repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
FPConv | FPConv-master/fpconv/pointnet2/setup.py | from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='pointnet2',
ext_modules=[
CUDAExtension('pointnet2_cuda', [
'src/pointnet2_api.cpp',
'src/ball_query.cpp',
'src/ball_query_gpu.cu',
'src/group_points.cpp',
'src/group_points_gpu.cu',
'src/interpolate.cpp',
'src/interpolate_gpu.cu',
'src/sampling.cpp',
'src/sampling_gpu.cu',
],
extra_compile_args={'cxx': ['-g'],
'nvcc': ['-O2']})
],
cmdclass={'build_ext': BuildExtension}
)
| 679 | 27.333333 | 67 | py |
FPConv | FPConv-master/fpconv/pointnet2/pointnet2_utils.py | import torch
from torch.autograd import Variable
from torch.autograd import Function
import torch.nn as nn
from typing import Tuple
import pointnet2_cuda as pointnet2
class FurthestPointSampling(Function):
@staticmethod
def forward(ctx, xyz: torch.Tensor, npoint: int) -> torch.Tensor:
"""
Uses iterative furthest point sampling to select a set of npoint features that have the largest
minimum distance
:param ctx:
:param xyz: (B, N, 3) where N > npoint
:param npoint: int, number of features in the sampled set
:return:
output: (B, npoint) tensor containing the set
"""
assert xyz.is_contiguous()
B, N, _ = xyz.size()
output = torch.cuda.IntTensor(B, npoint)
temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
pointnet2.furthest_point_sampling_wrapper(B, N, npoint, xyz, temp, output)
return output
@staticmethod
def backward(xyz, a=None):
return None, None
furthest_point_sample = FurthestPointSampling.apply
class GatherOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param features: (B, C, N)
:param idx: (B, npoint) index tensor of the features to gather
:return:
output: (B, C, npoint)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
B, npoint = idx.size()
_, C, N = features.size()
output = torch.cuda.FloatTensor(B, C, npoint)
pointnet2.gather_points_wrapper(B, C, N, npoint, features, idx, output)
ctx.for_backwards = (idx, C, N)
return output
@staticmethod
def backward(ctx, grad_out):
idx, C, N = ctx.for_backwards
B, npoint = idx.size()
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.gather_points_grad_wrapper(B, C, N, npoint, grad_out_data, idx, grad_features.data)
return grad_features, None
gather_operation = GatherOperation.apply
class ThreeNN(Function):
@staticmethod
def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Find the three nearest neighbors of unknown in known
:param ctx:
:param unknown: (B, N, 3)
:param known: (B, M, 3)
:return:
dist: (B, N, 3) l2 distance to the three nearest neighbors
idx: (B, N, 3) index of 3 nearest neighbors
"""
assert unknown.is_contiguous()
assert known.is_contiguous()
B, N, _ = unknown.size()
m = known.size(1)
dist2 = torch.cuda.FloatTensor(B, N, 3)
idx = torch.cuda.IntTensor(B, N, 3)
pointnet2.three_nn_wrapper(B, N, m, unknown, known, dist2, idx)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
three_nn = ThreeNN.apply
class ThreeInterpolate(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
"""
Performs weight linear interpolation on 3 features
:param ctx:
:param features: (B, C, M) Features descriptors to be interpolated from
:param idx: (B, n, 3) three nearest neighbors of the target features in features
:param weight: (B, n, 3) weights
:return:
output: (B, C, N) tensor of the interpolated features
"""
assert features.is_contiguous()
assert idx.is_contiguous()
assert weight.is_contiguous()
B, c, m = features.size()
n = idx.size(1)
ctx.three_interpolate_for_backward = (idx, weight, m)
output = torch.cuda.FloatTensor(B, c, n)
pointnet2.three_interpolate_wrapper(B, c, m, n, features, idx, weight, output)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
:param ctx:
:param grad_out: (B, C, N) tensor with gradients of outputs
:return:
grad_features: (B, C, M) tensor with gradients of features
None:
None:
"""
idx, weight, m = ctx.three_interpolate_for_backward
B, c, n = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(B, c, m).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.three_interpolate_grad_wrapper(B, c, n, m, grad_out_data, idx, weight, grad_features.data)
return grad_features, None, None
three_interpolate = ThreeInterpolate.apply
class GroupingOperation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param features: (B, C, N) tensor of features to group
:param idx: (B, npoint, nsample) tensor containing the indicies of features to group with
:return:
output: (B, C, npoint, nsample) tensor
"""
assert features.is_contiguous()
assert idx.is_contiguous()
B, nfeatures, nsample = idx.size()
_, C, N = features.size()
output = torch.cuda.FloatTensor(B, C, nfeatures, nsample)
pointnet2.group_points_wrapper(B, C, N, nfeatures, nsample, features, idx, output)
ctx.for_backwards = (idx, N)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
:param ctx:
:param grad_out: (B, C, npoint, nsample) tensor of the gradients of the output from forward
:return:
grad_features: (B, C, N) gradient of the features
"""
idx, N = ctx.for_backwards
B, C, npoint, nsample = grad_out.size()
grad_features = Variable(torch.cuda.FloatTensor(B, C, N).zero_())
grad_out_data = grad_out.data.contiguous()
pointnet2.group_points_grad_wrapper(B, C, N, npoint, nsample, grad_out_data, idx, grad_features.data)
return grad_features, None
grouping_operation = GroupingOperation.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param radius: float, radius of the balls
:param nsample: int, maximum number of features in the balls
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centers of the ball query
:return:
idx: (B, npoint, nsample) tensor with the indicies of the features that form the query balls
"""
assert new_xyz.is_contiguous()
assert xyz.is_contiguous()
B, N, _ = xyz.size()
npoint = new_xyz.size(1)
idx = torch.cuda.IntTensor(B, npoint, nsample).zero_()
pointnet2.ball_query_wrapper(B, N, npoint, radius, nsample, new_xyz, xyz, idx)
return idx
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ball_query = BallQuery.apply
class QueryAndGroup(nn.Module):
def __init__(self, radius: float, nsample: int, use_xyz: bool = True):
"""
:param radius: float, radius of ball
:param nsample: int, maximum number of features to gather in the ball
:param use_xyz:
"""
super().__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centroids
:param features: (B, C, N) descriptors of the features
:return:
new_features: (B, 3 + C, npoint, nsample)
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping_operation(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, C + 3, npoint, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class QueryAndGroupLocal(nn.Module):
def __init__(self, radius: float, nsample: int):
"""
:param radius: float, radius of ball
:param nsample: int, maximum number of features to gather in the ball
:param use_xyz:
"""
super().__init__()
self.radius, self.nsample = radius, nsample
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centroids
:param features: (B, C, N) descriptors of the features
:return:
grouped_xyz: B, 3, npoint, nsample <local coordinates>
new_features: (B, C, npoint, nsample)
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
# print(idx[0, 50, :])
# print(idx[0, 51, :])
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1) # local xyz
grouped_features = grouping_operation(features, idx)
return grouped_xyz, grouped_features
# if features is not None:
# grouped_features = grouping_operation(features, idx)
# if self.use_xyz:
# new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, C + 3, npoint, nsample)
# else:
# new_features = grouped_features
# else:
# assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
# new_features = grouped_xyz
# return new_features
class QueryAndGroupXYZ(nn.Module):
def __init__(self, radius: float, nsample: int):
"""
:param radius: float, radius of ball
:param nsample: int, maximum number of features to gather in the ball
:param use_xyz:
"""
super().__init__()
self.radius, self.nsample = radius, nsample
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: (B, npoint, 3) centroids
:return:
grouped_xyz: B, 3, npoint, nsample <local coordinates>
"""
idx = ball_query(self.radius, self.nsample, xyz, new_xyz)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1) # local xyz
return grouped_xyz
class GroupAll(nn.Module):
def __init__(self, use_xyz: bool = True):
super().__init__()
self.use_xyz = use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None):
"""
:param xyz: (B, N, 3) xyz coordinates of the features
:param new_xyz: ignored
:param features: (B, C, N) descriptors of the features
:return:
new_features: (B, C + 3, 1, N)
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (B, 3 + C, 1, N)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
| 12,424 | 33.803922 | 118 | py |
FPConv | FPConv-master/fpconv/pointnet2/__init__.py | 0 | 0 | 0 | py | |
FPConv | FPConv-master/fpconv/pointnet2/pointnet2_modules.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from . import pointnet2_utils
from . import pytorch_utils as pt_utils
from typing import List
class _PointnetSAModuleBase(nn.Module):
def __init__(self):
super().__init__()
self.npoint = None
self.groupers = None
self.mlps = None
self.pool_method = 'max_pool'
def forward(self, xyz: torch.Tensor, features: torch.Tensor = None, new_xyz=None) -> (torch.Tensor, torch.Tensor):
"""
:param xyz: (B, N, 3) tensor of the xyz coordinates of the features
:param features: (B, N, C) tensor of the descriptors of the the features
:param new_xyz:
:return:
new_xyz: (B, npoint, 3) tensor of the new features' xyz
new_features: (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors
"""
new_features_list = []
xyz_flipped = xyz.transpose(1, 2).contiguous()
if new_xyz is None:
new_xyz = pointnet2_utils.gather_operation(
xyz_flipped,
pointnet2_utils.furthest_point_sample(xyz, self.npoint)
).transpose(1, 2).contiguous() if self.npoint is not None else None
for i in range(len(self.groupers)):
new_features = self.groupers[i](xyz, new_xyz, features) # (B, C, npoint, nsample)
new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample)
if self.pool_method == 'max_pool':
new_features = F.max_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
elif self.pool_method == 'avg_pool':
new_features = F.avg_pool2d(
new_features, kernel_size=[1, new_features.size(3)]
) # (B, mlp[-1], npoint, 1)
else:
raise NotImplementedError
new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint)
new_features_list.append(new_features)
return new_xyz, torch.cat(new_features_list, dim=1)
class PointnetSAModuleMSG(_PointnetSAModuleBase):
"""Pointnet set abstraction layer with multiscale grouping"""
def __init__(self, *, npoint: int, radii: List[float], nsamples: List[int], mlps: List[List[int]], bn: bool = True,
use_xyz: bool = True, pool_method='max_pool', instance_norm=False):
"""
:param npoint: int
:param radii: list of float, list of radii to group with
:param nsamples: list of int, number of samples in each ball query
:param mlps: list of list of int, spec of the pointnet before the global pooling for each scale
:param bn: whether to use batchnorm
:param use_xyz:
:param pool_method: max_pool / avg_pool
:param instance_norm: whether to use instance_norm
"""
super().__init__()
assert len(radii) == len(nsamples) == len(mlps)
self.npoint = npoint
self.groupers = nn.ModuleList()
self.mlps = nn.ModuleList()
for i in range(len(radii)):
radius = radii[i]
nsample = nsamples[i]
self.groupers.append(
pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz)
if npoint is not None else pointnet2_utils.GroupAll(use_xyz)
)
mlp_spec = mlps[i]
if use_xyz:
mlp_spec[0] += 3
self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn, instance_norm=instance_norm))
self.pool_method = pool_method
class PointnetSAModule(PointnetSAModuleMSG):
"""Pointnet set abstraction layer"""
def __init__(self, *, mlp: List[int], npoint: int = None, radius: float = None, nsample: int = None,
bn: bool = True, use_xyz: bool = True, pool_method='max_pool', instance_norm=False):
"""
:param mlp: list of int, spec of the pointnet before the global max_pool
:param npoint: int, number of features
:param radius: float, radius of ball
:param nsample: int, number of samples in the ball query
:param bn: whether to use batchnorm
:param use_xyz:
:param pool_method: max_pool / avg_pool
:param instance_norm: whether to use instance_norm
"""
super().__init__(
mlps=[mlp], npoint=npoint, radii=[radius], nsamples=[nsample], bn=bn, use_xyz=use_xyz,
pool_method=pool_method, instance_norm=instance_norm
)
class PointnetFPModule(nn.Module):
r"""Propigates the features of one set to another"""
def __init__(self, *, mlp: List[int], bn: bool = True):
"""
:param mlp: list of int
:param bn: whether to use batchnorm
"""
super().__init__()
self.mlp = pt_utils.SharedMLP(mlp, bn=bn)
def forward(
self, unknown: torch.Tensor, known: torch.Tensor, unknow_feats: torch.Tensor, known_feats: torch.Tensor
) -> torch.Tensor:
"""
:param unknown: (B, n, 3) tensor of the xyz positions of the unknown features
:param known: (B, m, 3) tensor of the xyz positions of the known features
:param unknow_feats: (B, C1, n) tensor of the features to be propigated to
:param known_feats: (B, C2, m) tensor of features to be propigated
:return:
new_features: (B, mlp[-1], n) tensor of the features of the unknown features
"""
if known is not None:
dist, idx = pointnet2_utils.three_nn(unknown, known)
dist_recip = 1.0 / (dist + 1e-8)
norm = torch.sum(dist_recip, dim=2, keepdim=True)
weight = dist_recip / norm
interpolated_feats = pointnet2_utils.three_interpolate(known_feats, idx, weight)
else:
interpolated_feats = known_feats.expand(*known_feats.size()[0:2], unknown.size(1))
if unknow_feats is not None:
new_features = torch.cat([interpolated_feats, unknow_feats], dim=1) # (B, C2 + C1, n)
else:
new_features = interpolated_feats
new_features = new_features.unsqueeze(-1)
new_features = self.mlp(new_features)
return new_features.squeeze(-1)
if __name__ == "__main__":
pass
| 6,338 | 38.61875 | 119 | py |
FPConv | FPConv-master/fpconv/pointnet2/pytorch_utils.py | import torch.nn as nn
from typing import List, Tuple
class SharedMLP(nn.Sequential):
def __init__(
self,
args: List[int],
*,
bn: bool = False,
activation=nn.ReLU(inplace=True),
preact: bool = False,
first: bool = False,
name: str = "",
instance_norm: bool = False,):
super().__init__()
for i in range(len(args) - 1):
self.add_module(
name + 'layer{}'.format(i),
Conv2d(
args[i],
args[i + 1],
bn=(not first or not preact or (i != 0)) and bn,
activation=activation
if (not first or not preact or (i != 0)) else None,
preact=preact,
instance_norm=instance_norm
)
)
class _ConvBase(nn.Sequential):
def __init__(
self,
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=None,
batch_norm=None,
bias=True,
preact=False,
name="",
instance_norm=False,
instance_norm_func=None
):
super().__init__()
bias = bias and (not bn)
conv_unit = conv(
in_size,
out_size,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=bias
)
init(conv_unit.weight)
if bias:
nn.init.constant_(conv_unit.bias, 0)
if bn:
if not preact:
bn_unit = batch_norm(out_size)
else:
bn_unit = batch_norm(in_size)
if instance_norm:
if not preact:
in_unit = instance_norm_func(out_size, affine=False, track_running_stats=False)
else:
in_unit = instance_norm_func(in_size, affine=False, track_running_stats=False)
if preact:
if bn:
self.add_module(name + 'bn', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
if not bn and instance_norm:
self.add_module(name + 'in', in_unit)
self.add_module(name + 'conv', conv_unit)
if not preact:
if bn:
self.add_module(name + 'bn', bn_unit)
if activation is not None:
self.add_module(name + 'activation', activation)
if not bn and instance_norm:
self.add_module(name + 'in', in_unit)
class _BNBase(nn.Sequential):
def __init__(self, in_size, batch_norm=None, name=""):
super().__init__()
self.add_module(name + "bn", batch_norm(in_size))
nn.init.constant_(self[0].weight, 1.0)
nn.init.constant_(self[0].bias, 0)
class BatchNorm1d(_BNBase):
def __init__(self, in_size: int, *, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name)
class BatchNorm2d(_BNBase):
def __init__(self, in_size: int, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name)
class BatchNorm3d(_BNBase):
def __init__(self, in_size: int, name: str = ""):
super().__init__(in_size, batch_norm=nn.BatchNorm3d, name=name)
class Conv1d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: int = 1,
stride: int = 1,
padding: int = 0,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = "",
instance_norm=False
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv1d,
batch_norm=BatchNorm1d,
bias=bias,
preact=preact,
name=name,
instance_norm=instance_norm,
instance_norm_func=nn.InstanceNorm1d
)
class Conv2d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: Tuple[int, int] = (1, 1),
stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0),
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = "",
instance_norm=False
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv2d,
batch_norm=BatchNorm2d,
bias=bias,
preact=preact,
name=name,
instance_norm=instance_norm,
instance_norm_func=nn.InstanceNorm2d
)
class Conv3d(_ConvBase):
def __init__(
self,
in_size: int,
out_size: int,
*,
kernel_size: Tuple[int, int, int] = (1, 1, 1),
stride: Tuple[int, int, int] = (1, 1, 1),
padding: Tuple[int, int, int] = (0, 0, 0),
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=nn.init.kaiming_normal_,
bias: bool = True,
preact: bool = False,
name: str = "",
instance_norm=False
):
super().__init__(
in_size,
out_size,
kernel_size,
stride,
padding,
activation,
bn,
init,
conv=nn.Conv3d,
batch_norm=BatchNorm3d,
bias=bias,
preact=preact,
name=name,
instance_norm=instance_norm,
instance_norm_func=nn.InstanceNorm3d
)
class FC(nn.Sequential):
def __init__(
self,
in_size: int,
out_size: int,
*,
activation=nn.ReLU(inplace=True),
bn: bool = False,
init=None,
preact: bool = False,
name: str = ""
):
super().__init__()
fc = nn.Linear(in_size, out_size, bias=not bn)
if init is not None:
init(fc.weight)
if not bn:
nn.init.constant(fc.bias, 0)
if preact:
if bn:
self.add_module(name + 'bn', BatchNorm1d(in_size))
if activation is not None:
self.add_module(name + 'activation', activation)
self.add_module(name + 'fc', fc)
if not preact:
if bn:
self.add_module(name + 'bn', BatchNorm1d(out_size))
if activation is not None:
self.add_module(name + 'activation', activation)
| 7,312 | 25.305755 | 95 | py |
FPConv | FPConv-master/datasets/s3dis_dataset.py | import os
import numpy as np
import sys
from torch.utils.data import Dataset
class S3DIS(Dataset):
def __init__(self, split='train', data_root='trainval_fullarea', num_point=4096, test_area=5, block_size=1.0, sample_rate=1.0, transform=None, if_normal=True):
super().__init__()
print('Initiating DataLoader....')
self.if_normal = if_normal
self.num_point = num_point
self.block_size = block_size
self.transform = transform
rooms = sorted(os.listdir(data_root))
rooms = [room for room in rooms if 'Area_' in room]
if split == 'train':
rooms_split = [
room for room in rooms if not 'Area_{}'.format(test_area) in room]
else:
rooms_split = [
room for room in rooms if 'Area_{}'.format(test_area) in room]
self.room_points, self.room_labels = [], []
self.room_coord_min, self.room_coord_max = [], []
num_point_all = []
for room_name in rooms_split:
room_path = os.path.join(data_root, room_name)
room_data = np.load(room_path) # xyzrgbl, N*7
# xyzrgb, N*6; l, N
points, labels = room_data[:, 0:6], room_data[:, 6]
points[:, 0:3] -= np.amin(points, axis=0)[0:3]
coord_min, coord_max = np.amin(points, axis=0)[
:3], np.amax(points, axis=0)[:3]
self.room_points.append(points), self.room_labels.append(labels)
self.room_coord_min.append(
coord_min), self.room_coord_max.append(coord_max)
num_point_all.append(labels.size)
# Generate label weights
self.labelweights = self.__gen_labelweights(self.room_labels)
sample_prob = num_point_all / np.sum(num_point_all)
num_iter = int(np.sum(num_point_all) * sample_rate / num_point)
room_idxs = []
for index in range(len(rooms_split)):
room_idxs.extend(
[index] * int(round(sample_prob[index] * num_iter)))
self.room_idxs = np.array(room_idxs)
np.random.seed(123)
np.random.shuffle(self.room_idxs)
print('Num of labels: ', len(self.room_labels))
print("Totally {} samples in {} set.".format(len(self.room_idxs), split))
def __gen_labelweights(self, labels):
labelweights = np.zeros(13)
for seg in labels:
tmp, _ = np.histogram(seg, range(14))
labelweights += tmp
labelweights = labelweights.astype(np.float32)
labelweights = labelweights / np.sum(labelweights)
# self.labelweights = 1/np.log(1.2+labelweights)
return np.power(np.amax(labelweights) / labelweights, 1 / 3.0)
def __getitem__(self, idx):
room_idx = self.room_idxs[idx]
points = self.room_points[room_idx] # N * 6
labels = self.room_labels[room_idx] # N
N_points = points.shape[0]
while True:
center = points[np.random.choice(N_points)][:3]
block_min = center - [self.block_size /
2.0, self.block_size / 2.0, 0]
block_max = center + [self.block_size /
2.0, self.block_size / 2.0, 0]
point_idxs = np.where((points[:, 0] >= block_min[0]) & (points[:, 0] <= block_max[0]) & (
points[:, 1] >= block_min[1]) & (points[:, 1] <= block_max[1]))[0]
if point_idxs.size > 1024:
break
if point_idxs.size >= self.num_point:
selected_point_idxs = np.random.choice(
point_idxs, self.num_point, replace=False)
else:
selected_point_idxs = np.random.choice(
point_idxs, self.num_point, replace=True)
# normalize
selected_points = points[selected_point_idxs, :] # num_point * 6
selected_points[:, 0] = selected_points[:, 0] - center[0]
selected_points[:, 1] = selected_points[:, 1] - center[1]
selected_points[:, 3:6] /= 255.0
if self.if_normal:
current_points = np.zeros((self.num_point, 9)) # num_point * 9
current_points[:, 6] = selected_points[:, 0] / \
self.room_coord_max[room_idx][0]
current_points[:, 7] = selected_points[:, 1] / \
self.room_coord_max[room_idx][1]
current_points[:, 8] = selected_points[:, 2] / \
self.room_coord_max[room_idx][2]
current_points[:, 0:6] = selected_points
else:
current_points = selected_points
current_labels = labels[selected_point_idxs]
if self.transform is not None:
current_points, current_labels = self.transform(
current_points, current_labels)
sampleweights = self.labelweights[current_labels.astype(np.uint8)]
return current_points, current_labels, sampleweights
def __len__(self):
return len(self.room_idxs)
if __name__ == '__main__':
data_root = '/home/zizheng/data/s3dis/stanford_indoor3d_all_classes'
num_point, test_area, block_size, sample_rate = 4096, 5, 1.0, 0.01
import psutil
print("Before loading, the memory usage is ", psutil.virtual_memory())
point_data = S3DIS(split='train', data_root=data_root, num_point=num_point,
test_area=test_area, block_size=block_size, sample_rate=sample_rate, transform=None)
print('point data size:', point_data.__len__())
print('point data 0 shape:', point_data.__getitem__(0)[0].shape)
print('point label 0 shape:', point_data.__getitem__(0)[1].shape)
import torch
import time
import random
manual_seed = 123
random.seed(manual_seed)
np.random.seed(manual_seed)
torch.manual_seed(manual_seed)
torch.cuda.manual_seed_all(manual_seed)
print("After loading, the memory usage is ", psutil.virtual_memory())
def worker_init_fn(worker_id):
random.seed(manual_seed + worker_id)
train_loader = torch.utils.data.DataLoader(
point_data, batch_size=32, shuffle=True, num_workers=16, pin_memory=True, worker_init_fn=worker_init_fn)
for idx in range(4):
end = time.time()
for i, (points, target, weight) in enumerate(train_loader):
print('time: {}/{}--{}'.format(i + 1,
len(train_loader), time.time() - end))
print('Size of points: ', points.size())
points_np = points.cpu().data.numpy()
points_np_block1 = points_np[0, ...]
minp = points_np_block1[:, 0].min()
maxp = points_np_block1[:, 0].max()
print('weight is ', weight)
print('Min in x is {}, Max in x is {}'.format(minp, maxp))
print('Min in y is {}, Max in y is {}'.format(
points_np_block1[:, 1].min(), points_np_block1[:, 1].max()))
print("In loop, the memory usage is ", psutil.virtual_memory())
sys.exit(0)
| 7,008 | 42.265432 | 163 | py |
FPConv | FPConv-master/datasets/scannet_dataset_rgb_test.py | import pickle
import os
import sys
import numpy as np
import torch.utils.data as torch_data
class ScannetDatasetWholeScene_evaluation(torch_data.IterableDataset):
#prepare to give prediction on each points
def __init__(self, root=None, scene_list_dir=None, split='test', num_class=21, block_points=10240, with_norm=True, with_rgb=True):
super().__init__()
print(' ---- load data from', root)
self.block_points = block_points
self.indices = [0, 1, 2]
if with_norm: self.indices += [3, 4, 5]
if with_rgb: self.indices += [6, 7, 8]
print('load scannet <TEST> dataset <{}> with npoint {}, indices: {}.'.format(split, block_points, self.indices))
self.point_num = []
self.temp_data = []
self.temp_index = 0
self.now_index = 0
data_filename = os.path.join(root, 'scannet_%s_rgb21c_pointid.pickle' % (split))
with open(data_filename, 'rb') as fp:
self.scene_points_list = pickle.load(fp)
self.semantic_labels_list = pickle.load(fp)
self.scene_points_id = pickle.load(fp)
self.scene_points_num = pickle.load(fp)
file_path = os.path.join(scene_list_dir, 'scannetv2_{}.txt'.format(split))
num_class = 21
if split == 'test' or split == 'eval' or split == 'train':
self.labelweights = np.ones(num_class)
for seg in self.semantic_labels_list:
self.point_num.append(seg.shape[0])
with open(file_path) as fl:
self.scene_list = fl.read().splitlines()
else:
raise ValueError('split must be test or eval, {}'.format(split))
def reset(self):
self.temp_data = []
self.temp_index = 0
self.now_index = 0
def __iter__(self):
if self.now_index >= len(self.scene_points_list):
print(' ==== reset dataset index ==== ')
self.reset()
self.gen_batch_data()
return self
def chunks(self, l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def split_data(self, data, idx):
new_data = []
for i in range(len(idx)):
new_data += [data[idx[i]]]
return new_data
def nearest_dist(self, block_center, block_center_list):
num_blocks = len(block_center_list)
dist = np.zeros(num_blocks)
for i in range(num_blocks):
dist[i] = np.linalg.norm(block_center_list[i] - block_center, ord = 2) #i->j
return np.argsort(dist)[0]
def gen_batch_data(self):
index = self.now_index
self.now_index += 1
self.temp_data = []
self.temp_index = 0
print(' ==== generate batch data of {} ==== '.format(self.scene_list[index]))
delta = 0.5
# if self.with_rgb:
point_set_ini = self.scene_points_list[index]
# else:
# point_set_ini = self.scene_points_list[index][:, 0:3]
semantic_seg_ini = self.semantic_labels_list[index].astype(np.int32)
coordmax = np.max(point_set_ini[:, 0:3],axis=0)
coordmin = np.min(point_set_ini[:, 0:3],axis=0)
nsubvolume_x = np.ceil((coordmax[0]-coordmin[0])/delta).astype(np.int32)
nsubvolume_y = np.ceil((coordmax[1]-coordmin[1])/delta).astype(np.int32)
point_sets = []
semantic_segs = []
sample_weights = []
point_idxs = []
block_center = []
for i in range(nsubvolume_x):
for j in range(nsubvolume_y):
curmin = coordmin+[i*delta,j*delta,0]
curmax = curmin+[2,2,coordmax[2]-coordmin[2]]
curchoice = np.sum((point_set_ini[:,0:3]>=(curmin-0.2))*(point_set_ini[:,0:3]<=(curmax+0.2)),axis=1)==3
curchoice_idx = np.where(curchoice)[0]
cur_point_set = point_set_ini[curchoice,:]
cur_semantic_seg = semantic_seg_ini[curchoice]
if len(cur_semantic_seg)==0:
continue
mask = np.sum((cur_point_set[:,0:3]>=(curmin-0.001))*(cur_point_set[:,0:3]<=(curmax+0.001)),axis=1)==3
sample_weight = self.labelweights[cur_semantic_seg]
sample_weight *= mask # N
point_sets.append(cur_point_set) # 1xNx3/6
semantic_segs.append(cur_semantic_seg) # 1xN
sample_weights.append(sample_weight) # 1xN
point_idxs.append(curchoice_idx) #1xN
block_center.append((curmin[0:2] + curmax[0:2]) / 2.0)
# merge small blocks
num_blocks = len(point_sets)
block_idx = 0
while block_idx < num_blocks:
if point_sets[block_idx].shape[0] > self.block_points // 2:
block_idx += 1
continue
small_block_data = point_sets[block_idx].copy()
small_block_seg = semantic_segs[block_idx].copy()
small_block_smpw = sample_weights[block_idx].copy()
small_block_idxs = point_idxs[block_idx].copy()
small_block_center = block_center[block_idx].copy()
point_sets.pop(block_idx)
semantic_segs.pop(block_idx)
sample_weights.pop(block_idx)
point_idxs.pop(block_idx)
block_center.pop(block_idx)
nearest_block_idx = self.nearest_dist(small_block_center, block_center)
point_sets[nearest_block_idx] = np.concatenate((point_sets[nearest_block_idx], small_block_data), axis = 0)
semantic_segs[nearest_block_idx] = np.concatenate((semantic_segs[nearest_block_idx], small_block_seg), axis = 0)
sample_weights[nearest_block_idx] = np.concatenate((sample_weights[nearest_block_idx], small_block_smpw), axis = 0)
point_idxs[nearest_block_idx] = np.concatenate((point_idxs[nearest_block_idx], small_block_idxs), axis = 0)
num_blocks = len(point_sets)
#divide large blocks
num_blocks = len(point_sets)
div_blocks = []
div_blocks_seg = []
div_blocks_smpw = []
div_blocks_idxs = []
div_blocks_center = []
for block_idx in range(num_blocks):
cur_num_pts = point_sets[block_idx].shape[0]
point_idx_block = np.array([x for x in range(cur_num_pts)])
if point_idx_block.shape[0]%self.block_points != 0:
makeup_num = self.block_points - point_idx_block.shape[0]%self.block_points
np.random.shuffle(point_idx_block)
point_idx_block = np.concatenate((point_idx_block,point_idx_block[0:makeup_num].copy()))
np.random.shuffle(point_idx_block)
sub_blocks = list(self.chunks(point_idx_block, self.block_points))
div_blocks += self.split_data(point_sets[block_idx], sub_blocks)
div_blocks_seg += self.split_data(semantic_segs[block_idx], sub_blocks)
div_blocks_smpw += self.split_data(sample_weights[block_idx], sub_blocks)
div_blocks_idxs += self.split_data(point_idxs[block_idx], sub_blocks)
div_blocks_center += [block_center[block_idx].copy() for i in range(len(sub_blocks))]
for i in range(len(div_blocks)):
selected_points = div_blocks[i]
point_set = np.zeros([self.block_points, 9])
point_set[:, :3] = selected_points[:, :3] # xyz
for k in range(3): # normalized_xyz
point_set[:, 3 + k] = (selected_points[:, k] - coordmin[k]) / (coordmax[k] - coordmin[k])
point_set[:, 6:] = selected_points[:, 3:] / 255.0 # rgb
point_set = point_set[:, self.indices]
self.temp_data.append((point_set, div_blocks_seg[i], div_blocks_smpw[i], div_blocks_idxs[i]))
def __next__(self):
if self.temp_index >= len(self.temp_data):
raise StopIteration()
else:
idx = self.temp_index
self.temp_index += 1
return self.temp_data[idx]
| 8,118 | 42.417112 | 134 | py |
FPConv | FPConv-master/datasets/s3dis_dataset_test.py | import pickle
import os
import sys
import numpy as np
import torch.utils.data as torch_data
class S3DISWholeScene_evaluation(torch_data.IterableDataset):
# prepare to give prediction on each points
def __init__(self, root=None, split='test', test_area=5, num_class=13, block_points=8192, block_size=1.5, stride=0.5, with_rgb=True):
print('test area:', test_area)
self.root = root
self.split = split
self.with_rgb = with_rgb
self.block_points = block_points
self.block_size = block_size
self.stride = stride
self.point_num = []
self.temp_data = []
self.temp_index = 0
self.now_index = 0
self.scene_points_list = []
self.semantic_labels_list = []
rooms = sorted(os.listdir(root))
rooms = [room for room in rooms if 'Area_{}'.format(test_area) in room]
for room_name in rooms:
room_path = os.path.join(root, room_name)
room_data = np.load(room_path) # xyzrgbl, N*7
# xyzrgb, N*6; l, N
points, labels = room_data[:, 0:6], room_data[:, 6]
points[:, 0:3] -= np.amin(points, axis=0)[0:3]
self.scene_points_list.append(points)
self.semantic_labels_list.append(labels)
self.scene_list = [i.replace('.npy', '') for i in rooms]
for seg in self.semantic_labels_list:
self.point_num.append(seg.shape[0])
def reset(self):
self.temp_data = []
self.temp_index = 0
self.now_index = 0
def __iter__(self):
if self.now_index >= len(self.scene_points_list):
print(' ==== reset dataset index ==== ')
self.reset()
self.gen_batch_data()
return self
def chunks(self, l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def split_data(self, data, idx):
new_data = []
for i in range(len(idx)):
new_data += [data[idx[i]]]
return new_data
def nearest_dist(self, block_center, block_center_list):
num_blocks = len(block_center_list)
dist = np.zeros(num_blocks)
for i in range(num_blocks):
dist[i] = np.linalg.norm(
block_center_list[i] - block_center, ord=2) # i->j
return np.argsort(dist)[0]
def gen_batch_data(self):
index = self.now_index
self.now_index += 1
self.temp_data = []
self.temp_index = 0
print(' ==== generate batch data of {} ==== '.format(
self.scene_list[index]))
delta = self.stride
if self.with_rgb:
point_set_ini = self.scene_points_list[index]
else:
point_set_ini = self.scene_points_list[index][:, 0:3]
semantic_seg_ini = self.semantic_labels_list[index].astype(np.int32)
coordmax = np.max(point_set_ini[:, 0:3], axis=0)
coordmin = np.min(point_set_ini[:, 0:3], axis=0)
nsubvolume_x = np.ceil(
(coordmax[0] - coordmin[0]) / delta).astype(np.int32)
nsubvolume_y = np.ceil(
(coordmax[1] - coordmin[1]) / delta).astype(np.int32)
point_sets = []
semantic_segs = []
sample_weights = []
point_idxs = []
block_center = []
for i in range(nsubvolume_x):
for j in range(nsubvolume_y):
curmin = coordmin + [i * delta, j * delta, 0]
curmax = curmin + [self.block_size, self.block_size, 0]
curchoice = np.where((point_set_ini[:, 0] >= curmin[0]) & (point_set_ini[:, 0] <= curmax[0]) & (
point_set_ini[:, 1] >= curmin[1]) & (point_set_ini[:, 1] <= curmax[1]))[0]
cur_point_set = point_set_ini[curchoice, :]
cur_semantic_seg = semantic_seg_ini[curchoice]
bc = (curmin[0:2] + curmax[0:2]) / 2.0
cur_point_set[:, 0] -= bc[0]
cur_point_set[:, 1] -= bc[1]
current_points = np.zeros((cur_point_set.shape[0], 9))
current_points[:, 6] = cur_point_set[:, 0] / coordmax[0]
current_points[:, 7] = cur_point_set[:, 1] / coordmax[1]
current_points[:, 8] = cur_point_set[:, 2] / coordmax[2]
current_points[:, 0:6] = cur_point_set
if len(cur_semantic_seg) == 0:
continue
point_sets.append(current_points) # 1xNx3/6
semantic_segs.append(cur_semantic_seg) # 1xN
point_idxs.append(curchoice) # 1xN
block_center.append(bc)
# merge small blocks
num_blocks = len(point_sets)
block_idx = 0
while block_idx < num_blocks:
if point_sets[block_idx].shape[0] > (self.block_points / 2):
block_idx += 1
continue
small_block_data = point_sets[block_idx].copy()
small_block_seg = semantic_segs[block_idx].copy()
small_block_idxs = point_idxs[block_idx].copy()
small_block_center = block_center[block_idx].copy()
point_sets.pop(block_idx)
semantic_segs.pop(block_idx)
point_idxs.pop(block_idx)
block_center.pop(block_idx)
nearest_block_idx = self.nearest_dist(
small_block_center, block_center)
point_sets[nearest_block_idx] = np.concatenate(
(point_sets[nearest_block_idx], small_block_data), axis=0)
semantic_segs[nearest_block_idx] = np.concatenate(
(semantic_segs[nearest_block_idx], small_block_seg), axis=0)
point_idxs[nearest_block_idx] = np.concatenate(
(point_idxs[nearest_block_idx], small_block_idxs), axis=0)
num_blocks = len(point_sets)
# divide large blocks
num_blocks = len(point_sets)
div_blocks = []
div_blocks_seg = []
# div_blocks_smpw = []
div_blocks_idxs = []
div_blocks_center = []
for block_idx in range(num_blocks):
cur_num_pts = point_sets[block_idx].shape[0]
point_idx_block = np.array([x for x in range(cur_num_pts)])
if point_idx_block.shape[0] % self.block_points != 0:
makeup_num = self.block_points - \
point_idx_block.shape[0] % self.block_points
np.random.shuffle(point_idx_block)
point_idx_block = np.concatenate(
(point_idx_block, point_idx_block[0:makeup_num].copy()))
np.random.shuffle(point_idx_block)
sub_blocks = list(self.chunks(point_idx_block, self.block_points))
div_blocks += self.split_data(point_sets[block_idx], sub_blocks)
div_blocks_seg += self.split_data(
semantic_segs[block_idx], sub_blocks)
div_blocks_idxs += self.split_data(
point_idxs[block_idx], sub_blocks)
div_blocks_center += [block_center[block_idx].copy()
for i in range(len(sub_blocks))]
for i in range(len(div_blocks)):
point_set = div_blocks[i]
if self.with_rgb:
point_set[:, 3:6] /= 255.0
self.temp_data.append(
(point_set, div_blocks_seg[i], div_blocks_idxs[i]))
def __next__(self):
if self.temp_index >= len(self.temp_data):
raise StopIteration()
else:
idx = self.temp_index
self.temp_index += 1
return self.temp_data[idx]
if __name__ == '__main__':
test_dst = S3DISWholeScene_evaluation(root='/home/zizheng/data/s3dis/stanford_indoor3d_all_classes',
split='test',
test_area=5,
block_points=8192,
with_rgb=True)
os.environ["CUDA_VISIBLE_DEVICES"] = '3'
loader = torch_data.DataLoader(
test_dst, batch_size=12, shuffle=False, pin_memory=True, num_workers=0)
for i, data in enumerate(loader):
a, b, d = data
print(a.shape)
print(np.max(a[0, :, 0].data.cpu().numpy()) -
np.min(a[0, :, 0].data.cpu().numpy()))
for i, data in enumerate(loader):
a, b, d = data
print(a.shape)
| 8,454 | 38.143519 | 137 | py |
FPConv | FPConv-master/datasets/__init__.py | 0 | 0 | 0 | py | |
FPConv | FPConv-master/datasets/scannet_dataset_rgb.py | import pickle
import os
import sys
import numpy as np
import torch.utils.data as torch_data
class ScannetDataset(torch_data.Dataset):
def __init__(self, root=None, npoints=10240, split='train', with_dropout=False, with_norm=False, with_rgb=False, sample_rate=None):
super().__init__()
print(' ---- load data from', root)
self.npoints = npoints
self.with_dropout = with_dropout
self.indices = [0, 1, 2]
if with_norm: self.indices += [3, 4, 5]
if with_rgb: self.indices += [6, 7, 8]
print('load scannet dataset <{}> with npoint {}, indices: {}.'.format(split, npoints, self.indices))
data_filename = os.path.join(root, 'scannet_%s_rgb21c_pointid.pickle' % (split))
with open(data_filename, 'rb') as fp:
self.scene_points_list = pickle.load(fp)
self.semantic_labels_list = pickle.load(fp)
scene_points_id = pickle.load(fp)
num_point_all = pickle.load(fp)
if split == 'train':
labelweights = np.zeros(21)
for seg in self.semantic_labels_list:
tmp,_ = np.histogram(seg,range(22))
labelweights += tmp
labelweights = labelweights.astype(np.float32)
labelweights = labelweights/np.sum(labelweights)
# self.labelweights = 1/np.log(1.2+labelweights)
self.labelweights = np.power(np.amax(labelweights[1:]) / labelweights, 1 / 3.0)
elif split == 'eval' or split == 'test':
self.labelweights = np.ones(21)
else:
raise ValueError('split must be train or eval.')
if sample_rate is not None:
num_point = npoints
sample_prob = num_point_all / np.sum(num_point_all)
num_iter = int(np.sum(num_point_all) * sample_rate / num_point)
room_idxs = []
for index in range(len(self.scene_points_list)):
repeat_times = round(sample_prob[index] * num_iter)
repeat_times = int(max(repeat_times, 1))
room_idxs.extend([index] * repeat_times)
self.room_idxs = np.array(room_idxs)
np.random.seed(123)
np.random.shuffle(self.room_idxs)
else:
self.room_idxs = np.arange(len(self.scene_points_list))
print("Totally {} samples in {} set.".format(len(self.room_idxs), split))
def __getitem__(self, index):
index = self.room_idxs[index]
data_set = self.scene_points_list[index]
point_set = data_set[:, :3]
semantic_seg = self.semantic_labels_list[index].astype(np.int32)
coordmax = np.max(point_set, axis=0)
coordmin = np.min(point_set, axis=0)
smpmin = np.maximum(coordmax-[2, 2, 3.0], coordmin)
smpmin[2] = coordmin[2]
smpsz = np.minimum(coordmax-smpmin,[2,2,3.0])
smpsz[2] = coordmax[2]-coordmin[2]
isvalid = False
# randomly choose a point as center point and sample <n_points> points in the box area of center-point
for i in range(10):
curcenter = point_set[np.random.choice(len(semantic_seg),1)[0],:]
curmin = curcenter - [1, 1, 1.5]
curmax = curcenter + [1, 1, 1.5]
curmin[2] = coordmin[2]
curmax[2] = coordmax[2]
curchoice = np.sum((point_set >= (curmin - 0.2)) * (point_set <= (curmax + 0.2)), axis=1) == 3
cur_point_set = point_set[curchoice, :]
cur_data_set = data_set[curchoice, :]
cur_semantic_seg = semantic_seg[curchoice]
if len(cur_semantic_seg) == 0:
continue
mask = np.sum((cur_point_set >= (curmin - 0.01)) * (cur_point_set <= (curmax + 0.01)), axis=1) == 3
vidx = np.ceil((cur_point_set[mask, :] - curmin) / (curmax - curmin) * [31.0, 31.0, 62.0])
vidx = np.unique(vidx[:, 0] * 31.0 * 62.0 + vidx[:, 1] * 62.0 + vidx[:, 2])
isvalid = np.sum(cur_semantic_seg > 0) / len(cur_semantic_seg) >= 0.7 and len(vidx) / 31.0 / 31.0 / 62.0 >= 0.02
if isvalid:
break
choice = np.random.choice(len(cur_semantic_seg), self.npoints, replace=True)
semantic_seg = cur_semantic_seg[choice]
mask = mask[choice]
sample_weight = self.labelweights[semantic_seg]
sample_weight *= mask
selected_points = cur_data_set[choice, :] # np * 6, xyz + rgb
point_set = np.zeros((self.npoints, 9)) # xyz, norm_xyz, rgb
point_set[:, :3] = selected_points[:, :3] # xyz
for i in range(3): # normalized_xyz
point_set[:, 3 + i] = (selected_points[:, i] - coordmin[i]) / (coordmax[i] - coordmin[i])
point_set[:, 6:] = selected_points[:, 3:] / 255.0 # rgb
if self.with_dropout:
dropout_ratio = np.random.random() * 0.875 # 0 ~ 0.875
drop_idx = np.where(np.random.random((self.npoints)) <= dropout_ratio)[0]
point_set[drop_idx, :] = point_set[0, :]
semantic_seg[drop_idx] = semantic_seg[0]
sample_weight[drop_idx] *= 0
point_set = point_set[:, self.indices]
return point_set, semantic_seg, sample_weight
def __len__(self):
return len(self.room_idxs)
# return len(self.scene_points_list)
class ScannetDatasetWholeScene(torch_data.IterableDataset):
def __init__(self, root=None, npoints=10240, split='train', with_norm=True, with_rgb=True):
super().__init__()
print(' ---- load data from', root)
self.npoints = npoints
self.indices = [0, 1, 2]
if with_norm: self.indices += [3, 4, 5]
if with_rgb: self.indices += [6, 7, 8]
print('load scannet <whole scene> dataset <{}> with npoint {}, indices: {}.'.format(split, npoints, self.indices))
self.temp_data = []
self.temp_index = 0
self.now_index = 0
data_filename = os.path.join(root, 'scannet_%s_rgb21c_pointid.pickle' % (split))
with open(data_filename, 'rb') as fp:
self.scene_points_list = pickle.load(fp)
self.semantic_labels_list = pickle.load(fp)
if split == 'train':
labelweights = np.zeros(21)
for seg in self.semantic_labels_list:
tmp,_ = np.histogram(seg,range(22))
labelweights += tmp
labelweights = labelweights.astype(np.float32)
labelweights = labelweights/np.sum(labelweights)
# self.labelweights = 1 / np.log(1.2 + labelweights)
self.labelweights = np.power(np.amax(labelweights[1:]) / labelweights, 1 / 3.0)
elif split == 'eval' or split == 'test':
self.labelweights = np.ones(21)
def get_data(self):
idx = self.temp_index
self.temp_index += 1
return self.temp_data[idx]
def reset(self):
self.temp_data = []
self.temp_index = 0
self.now_index = 0
def __iter__(self):
self.reset()
return self
def __next__(self):
if self.now_index >= len(self.scene_points_list) and self.temp_index >= len(self.temp_data):
raise StopIteration()
if self.temp_index < len(self.temp_data):
return self.get_data()
index = self.now_index
self.now_index += 1
self.temp_data = []
self.temp_index = 0
data_set_ini = self.scene_points_list[index]
point_set_ini = data_set_ini[:,:3]
semantic_seg_ini = self.semantic_labels_list[index].astype(np.int32)
coordmax = np.max(point_set_ini,axis=0)
coordmin = np.min(point_set_ini,axis=0)
nsubvolume_x = np.ceil((coordmax[0]-coordmin[0])/2).astype(np.int32)
nsubvolume_y = np.ceil((coordmax[1]-coordmin[1])/2).astype(np.int32)
point_sets = list()
semantic_segs = list()
sample_weights = list()
isvalid = False
for i in range(nsubvolume_x):
for j in range(nsubvolume_y):
curmin = coordmin+[i*2,j*2,0]
curmax = coordmin+[(i+1)*2,(j+1)*2,coordmax[2]-coordmin[2]]
curchoice = np.sum((point_set_ini>=(curmin-0.2))*(point_set_ini<=(curmax+0.2)),axis=1)==3
cur_point_set = point_set_ini[curchoice,:]
cur_data_set = data_set_ini[curchoice,:]
cur_semantic_seg = semantic_seg_ini[curchoice]
if len(cur_semantic_seg)==0:
continue
mask = np.sum((cur_point_set >= (curmin - 0.001)) * (cur_point_set <= (curmax + 0.001)), axis=1) == 3
choice = np.random.choice(len(cur_semantic_seg), self.npoints, replace=len(cur_semantic_seg) < self.npoints)
semantic_seg = cur_semantic_seg[choice] # N
mask = mask[choice]
if sum(mask) / float(len(mask)) < 0.01:
continue
sample_weight = self.labelweights[semantic_seg]
sample_weight *= mask # N
selected_points = cur_data_set[choice, :] # Nx6
point_set = np.zeros([self.npoints, 9])
point_set[:, :3] = selected_points[:, :3] # xyz
for k in range(3): # normalized_xyz
point_set[:, 3 + k] = (selected_points[:, k] - coordmin[k]) / (coordmax[k] - coordmin[k])
point_set[:, 6:] = selected_points[:, 3:] / 255.0 # rgb
point_set = point_set[:, self.indices]
self.temp_data.append((point_set, semantic_seg, sample_weight))
return self.get_data()
| 9,696 | 43.278539 | 135 | py |
FPConv | FPConv-master/utils/indoor3d_util.py | """
Modified from: https://github.com/charlesq34/pointnet/blob/master/sem_seg/indoor3d_util.py
"""
import numpy as np
import glob
import os
import sys
from plyfile import PlyData, PlyElement
# Shared class between two dataset
NYU_CLASS = [1, 2, 22, 9, 7, 5, 10, 6, 30, 8]
S3DIS_CLASS = [2, 1, 0, 5, 7, 8, 10, 9, 11, 6]
g_classes = ['ceiling', 'floor', 'wall', 'beam', 'column', 'window', 'door', 'table', 'chair', 'sofa', 'bookcase', 'board', 'clutter']
g_class2label = {cls: i for i, cls in enumerate(g_classes)}
g_class2color = {'ceiling': [0, 255, 0],
'floor': [0, 0, 255],
'wall': [0, 255, 255],
'beam': [255, 255, 0],
'column': [255, 0, 255],
'window': [100, 100, 255],
'door': [200, 200, 100],
'table': [170, 120, 200],
'chair': [255, 0, 0],
'sofa': [200, 100, 100],
'bookcase': [10, 200, 100],
'board': [200, 200, 200],
'clutter': [50, 50, 50]}
g_easy_view_labels = [7, 8, 9, 10, 11, 1]
g_label2color = {g_classes.index(cls): g_class2color[cls] for cls in g_classes}
# -----------------------------------------------------------------------------
# CONVERT ORIGINAL DATA TO OUR DATA_LABEL FILES
# -----------------------------------------------------------------------------
def delete_class(data_label, dataset_class):
for i in range(data_label.shape[0]):
if data_label[i, -1] not in dataset_class:
# An indicating number
data_label[i, -1] = 998
data_label = data_label[data_label[:, -1] != 998]
return data_label
def collect_point_label(anno_path, out_filename, file_format='txt', del_class=False):
""" Convert original dataset files to data_label file (each line is XYZRGBL).
We aggregated all the points from each instance in the room.
Args:
anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
out_filename: path to save collected points and labels (each line is XYZRGBL)
file_format: txt or numpy, determines what file format to save.
Returns:
None
Note:
the points are shifted before save, the most negative point is now at origin.
"""
points_list = []
print('anno_path is ', anno_path)
file_list = glob.glob(os.path.join(anno_path, '*.txt'))
file_list = list(np.sort(file_list))
for f in file_list:
print('f is ', f)
cls = os.path.basename(f).split('_')[0]
if cls not in g_classes: # note: in some room there is 'staris' class..
cls = 'clutter'
points = np.loadtxt(f)
labels = np.ones((points.shape[0], 1)) * g_class2label[cls]
points_list.append(np.concatenate([points, labels], 1)) # Nx7
data_label = np.concatenate(points_list, 0)
xyz_min = np.amin(data_label, axis=0)[0:3]
data_label[:, 0:3] -= xyz_min
if del_class:
data_label = delete_class(data_label, S3DIS_CLASS)
if file_format == 'txt':
fout = open(out_filename, 'w')
for i in range(data_label.shape[0]):
fout.write('%f %f %f %d %d %d %d\n' %
(data_label[i, 0], data_label[i, 1], data_label[i, 2],
data_label[i, 3], data_label[i,
4], data_label[i, 5],
data_label[i, 6]))
fout.close()
elif file_format == 'numpy':
np.save(out_filename, data_label)
else:
print('ERROR!! Unknown file format: %s, please use txt or numpy.' %
(file_format))
exit()
def point_label_to_obj(input_filename, out_filename, label_color=True, easy_view=False, no_wall=False):
""" For visualization of a room from data_label file,
input_filename: each line is X Y Z R G B L
out_filename: OBJ filename,
visualize input file by coloring point with label color
easy_view: only visualize furnitures and floor
"""
data_label = np.loadtxt(input_filename)
data = data_label[:, 0:6]
label = data_label[:, -1].astype(int)
fout = open(out_filename, 'w')
for i in range(data.shape[0]):
color = g_label2color[label[i]]
if easy_view and (label[i] not in g_easy_view_labels):
continue
if no_wall and ((label[i] == 2) or (label[i] == 0)):
continue
if label_color:
fout.write('v %f %f %f %d %d %d\n' %
(data[i, 0], data[i, 1], data[i, 2], color[0], color[1], color[2]))
else:
fout.write('v %f %f %f %d %d %d\n' %
(data[i, 0], data[i, 1], data[i, 2], data[i, 3], data[i, 4], data[i, 5]))
fout.close()
# -----------------------------------------------------------------------------
# PREPARE BLOCK DATA FOR DEEPNETS TRAINING/TESTING
# -----------------------------------------------------------------------------
def sample_data(data, num_sample):
""" data is in N x ...
we want to keep num_samplexC of them.
if N > num_sample, we will randomly keep num_sample of them.
if N < num_sample, we will randomly duplicate samples.
"""
N = data.shape[0]
if (N == num_sample):
return data, range(N)
elif (N > num_sample):
sample = np.random.choice(N, num_sample)
return data[sample, ...], sample
else:
sample = np.random.choice(N, num_sample - N)
dup_data = data[sample, ...]
return np.concatenate([data, dup_data], 0), list(range(N)) + list(sample)
def sample_data_label(data, label, num_sample):
new_data, sample_indices = sample_data(data, num_sample)
new_label = label[sample_indices]
return new_data, new_label
def room2blocks(data, label, num_point, block_size=1.0, stride=1.0,
random_sample=False, sample_num=None, sample_aug=1):
""" Prepare block training data.
Do not sample in
Args:
data: N x 6 numpy array, 012 are XYZ in meters, 345 are RGB in [0,1]
assumes the data is shifted (min point is origin) and aligned
(aligned with XYZ axis)
label: N size uint8 numpy array from 0-12
num_point: int, how many points to sample in each block
block_size: float, physical size of the block in meters
stride: float, stride for block sweeping
random_sample: bool, if True, we will randomly sample blocks in the room
sample_num: int, if random sample, how many blocks to sample
[default: room area]
sample_aug: if random sample, how much aug
Returns:
block_datas: K x num_point x 6 np array of XYZRGB, RGB is in [0,1]
block_labels: K x num_point x 1 np array of uint8 labels
TODO: for this version, blocking is in fixed, non-overlapping pattern.
"""
assert(stride <= block_size)
limit = np.amax(data, 0)[0:3]
# Get the corner location for our sampling blocks
xbeg_list = []
ybeg_list = []
if not random_sample:
num_block_x = int(np.ceil((limit[0] - block_size) / stride)) + 1
num_block_y = int(np.ceil((limit[1] - block_size) / stride)) + 1
for i in range(num_block_x):
for j in range(num_block_y):
xbeg_list.append(i * stride)
ybeg_list.append(j * stride)
else:
num_block_x = int(np.ceil(limit[0] / block_size))
num_block_y = int(np.ceil(limit[1] / block_size))
if sample_num is None:
sample_num = num_block_x * num_block_y * sample_aug
for _ in range(sample_num):
xbeg = np.random.uniform(-block_size, limit[0])
ybeg = np.random.uniform(-block_size, limit[1])
xbeg_list.append(xbeg)
ybeg_list.append(ybeg)
# Collect blocks
block_data_list = []
block_label_list = []
idx = 0
for idx in range(len(xbeg_list)):
xbeg = xbeg_list[idx]
ybeg = ybeg_list[idx]
xcond = (data[:, 0] <= xbeg + block_size) & (data[:, 0] >= xbeg)
ycond = (data[:, 1] <= ybeg + block_size) & (data[:, 1] >= ybeg)
cond = xcond & ycond
if np.sum(cond) < 100: # discard block if there are less than 100 pts.
continue
# if np.sum(cond) < 50: # discard block if there are less than 100 pts.
# continue
block_data = data[cond, :]
block_label = label[cond]
# randomly subsample data
block_data_sampled, block_label_sampled = \
sample_data_label(block_data, block_label, num_point)
block_data_list.append(np.expand_dims(block_data_sampled, 0))
block_label_list.append(np.expand_dims(block_label_sampled, 0))
return np.concatenate(block_data_list, 0), \
np.concatenate(block_label_list, 0)
def room2blocks_plus(data_label, num_point, block_size, stride,
random_sample, sample_num, sample_aug):
""" room2block with input filename and RGB preprocessing.
"""
data = data_label[:, 0:6]
data[:, 3:6] /= 255.0
label = data_label[:, -1].astype(np.uint8)
return room2blocks(data, label, num_point, block_size, stride,
random_sample, sample_num, sample_aug)
def room2blocks_wrapper(data_label_filename, num_point, block_size=1.0, stride=1.0,
random_sample=False, sample_num=None, sample_aug=1):
if data_label_filename[-3:] == 'txt':
data_label = np.loadtxt(data_label_filename)
elif data_label_filename[-3:] == 'npy':
data_label = np.load(data_label_filename)
else:
print('Unknown file type! exiting.')
exit()
return room2blocks_plus(data_label, num_point, block_size, stride,
random_sample, sample_num, sample_aug)
def room2blocks_plus_normalized(data_label, num_point, block_size, stride,
random_sample, sample_num, sample_aug):
""" room2block, with input filename and RGB preprocessing.
for each block centralize XYZ, add normalized XYZ as 678 channels
"""
data = data_label[:, 0:6]
data[:, 3:6] /= 255.0
label = data_label[:, -1].astype(np.uint8)
max_room_x = max(data[:, 0])
max_room_y = max(data[:, 1])
max_room_z = max(data[:, 2])
data_batch, label_batch = room2blocks(data, label, num_point, block_size, stride,
random_sample, sample_num, sample_aug)
new_data_batch = np.zeros((data_batch.shape[0], num_point, 9))
for b in range(data_batch.shape[0]):
# Normalized position
new_data_batch[b, :, 6] = data_batch[b, :, 0] / max_room_x
new_data_batch[b, :, 7] = data_batch[b, :, 1] / max_room_y
new_data_batch[b, :, 8] = data_batch[b, :, 2] / max_room_z
minx = min(data_batch[b, :, 0])
miny = min(data_batch[b, :, 1])
data_batch[b, :, 0] -= (minx + block_size / 2)
data_batch[b, :, 1] -= (miny + block_size / 2)
new_data_batch[:, :, 0:6] = data_batch
return new_data_batch, label_batch
def load_scenenn(data_label_filename):
print('Loading ', data_label_filename)
plydata = PlyData.read(data_label_filename)
# XYZLRGB
num_verts = plydata['vertex'].count
data_label = np.zeros(shape=[num_verts, 7], dtype=np.float32)
data_label[:, 0] = plydata['vertex'].data['x']
# due to raw data misplace
data_label[:, 1] = plydata['vertex'].data['z']
data_label[:, 2] = plydata['vertex'].data['y']
data_label[:, 3] = plydata['vertex'].data['red']
data_label[:, 4] = plydata['vertex'].data['green']
data_label[:, 5] = plydata['vertex'].data['blue']
data_label[:, 6] = plydata['vertex'].data['nyu_class']
# delete non-shared classes
data_label = delete_class(data_label, NYU_CLASS)
# make each axis larger than 0
xyz_min = np.amin(data_label, axis=0)[0:3]
data_label[:, 0:3] -= xyz_min
print('Loaded ', data_label_filename)
return data_label
def delete_outdoor(data_label):
xy_min = np.amin(data_label[data_label[:, -1] == 1], axis=0)[0:2]
xy_max = np.amax(data_label[data_label[:, -1] == 1], axis=0)[0:2]
xcond = (data_label[:, 0] >= xy_min[0] -
1) & (data_label[:, 0] <= xy_max[0] + 1)
ycond = (data_label[:, 1] >= xy_min[1] -
1) & (data_label[:, 1] <= xy_max[1] + 1)
cond = xcond & ycond
data_label = data_label[cond, :]
return data_label
def load_suncg(data_label_filename):
print('Loading ', data_label_filename)
data_label = np.loadtxt(data_label_filename)
# xzy to xyz
data_label[:, [1, 2]] = data_label[:, [2, 1]]
# delete non-shared classes
data_label = delete_class(data_label, NYU_CLASS)
data_label = delete_outdoor(data_label)
# make each axis larger than 0
xyz_min = np.amin(data_label, axis=0)[0:3]
data_label[:, 0:3] -= xyz_min
print('Loaded ', data_label_filename)
return data_label
def room2blocks_wrapper_normalized(data_label_filename, num_point, block_size=1.0, stride=1.0,
random_sample=False, sample_num=None, sample_aug=1):
if data_label_filename[-3:] == 'txt':
# data_label = np.loadtxt(data_label_filename)
data_label = load_suncg(data_label_filename)
elif data_label_filename[-3:] == 'npy':
data_label = np.load(data_label_filename)
elif data_label_filename[-3:] == 'ply':
data_label = load_scenenn(data_label_filename)
else:
print('Unknown file type! exiting.')
exit()
return room2blocks_plus_normalized(data_label, num_point, block_size, stride,
random_sample, sample_num, sample_aug)
def room2samples(data, label, sample_num_point):
""" Prepare whole room samples.
Args:
data: N x 6 numpy array, 012 are XYZ in meters, 345 are RGB in [0,1]
assumes the data is shifted (min point is origin) and
aligned (aligned with XYZ axis)
label: N size uint8 numpy array from 0-12
sample_num_point: int, how many points to sample in each sample
Returns:
sample_datas: K x sample_num_point x 9
numpy array of XYZRGBX'Y'Z', RGB is in [0,1]
sample_labels: K x sample_num_point x 1 np array of uint8 labels
"""
N = data.shape[0]
order = np.arange(N)
np.random.shuffle(order)
data = data[order, :]
label = label[order]
batch_num = int(np.ceil(N / float(sample_num_point)))
sample_datas = np.zeros((batch_num, sample_num_point, 6))
sample_labels = np.zeros((batch_num, sample_num_point, 1))
for i in range(batch_num):
beg_idx = i * sample_num_point
end_idx = min((i + 1) * sample_num_point, N)
num = end_idx - beg_idx
sample_datas[i, 0:num, :] = data[beg_idx:end_idx, :]
sample_labels[i, 0:num, 0] = label[beg_idx:end_idx]
if num < sample_num_point:
makeup_indices = np.random.choice(N, sample_num_point - num)
sample_datas[i, num:, :] = data[makeup_indices, :]
sample_labels[i, num:, 0] = label[makeup_indices]
return sample_datas, sample_labels
def room2samples_plus_normalized(data_label, num_point):
""" room2sample, with input filename and RGB preprocessing.
for each block centralize XYZ, add normalized XYZ as 678 channels
"""
data = data_label[:, 0:6]
data[:, 3:6] /= 255.0
label = data_label[:, -1].astype(np.uint8)
max_room_x = max(data[:, 0])
max_room_y = max(data[:, 1])
max_room_z = max(data[:, 2])
#print(max_room_x, max_room_y, max_room_z)
data_batch, label_batch = room2samples(data, label, num_point)
new_data_batch = np.zeros((data_batch.shape[0], num_point, 9))
for b in range(data_batch.shape[0]):
new_data_batch[b, :, 6] = data_batch[b, :, 0] / max_room_x
new_data_batch[b, :, 7] = data_batch[b, :, 1] / max_room_y
new_data_batch[b, :, 8] = data_batch[b, :, 2] / max_room_z
#minx = min(data_batch[b, :, 0])
#miny = min(data_batch[b, :, 1])
#data_batch[b, :, 0] -= (minx+block_size/2)
#data_batch[b, :, 1] -= (miny+block_size/2)
new_data_batch[:, :, 0:6] = data_batch
return new_data_batch, label_batch
def room2samples_wrapper_normalized(data_label_filename, num_point):
if data_label_filename[-3:] == 'txt':
data_label = np.loadtxt(data_label_filename)
elif data_label_filename[-3:] == 'npy':
data_label = np.load(data_label_filename)
else:
print('Unknown file type! exiting.')
exit()
return room2samples_plus_normalized(data_label, num_point)
# -----------------------------------------------------------------------------
# EXTRACT INSTANCE BBOX FROM ORIGINAL DATA (for detection evaluation)
# -----------------------------------------------------------------------------
def collect_bounding_box(anno_path, out_filename):
""" Compute bounding boxes from each instance in original dataset files on
one room. **We assume the bbox is aligned with XYZ coordinate.**
Args:
anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
out_filename: path to save instance bounding boxes for that room.
each line is x1 y1 z1 x2 y2 z2 label,
where (x1,y1,z1) is the point on the diagonal closer to origin
Returns:
None
Note:
room points are shifted, the most negative point is now at origin.
"""
bbox_label_list = []
for f in glob.glob(os.path.join(anno_path, '*.txt')):
cls = os.path.basename(f).split('_')[0]
if cls not in g_classes: # note: in some room there is 'staris' class..
cls = 'clutter'
points = np.loadtxt(f)
label = g_class2label[cls]
# Compute tightest axis aligned bounding box
xyz_min = np.amin(points[:, 0:3], axis=0)
xyz_max = np.amax(points[:, 0:3], axis=0)
ins_bbox_label = np.expand_dims(
np.concatenate([xyz_min, xyz_max, np.array([label])], 0), 0)
bbox_label_list.append(ins_bbox_label)
bbox_label = np.concatenate(bbox_label_list, 0)
room_xyz_min = np.amin(bbox_label[:, 0:3], axis=0)
bbox_label[:, 0:3] -= room_xyz_min
bbox_label[:, 3:6] -= room_xyz_min
fout = open(out_filename, 'w')
for i in range(bbox_label.shape[0]):
fout.write('%f %f %f %f %f %f %d\n' %
(bbox_label[i, 0], bbox_label[i, 1], bbox_label[i, 2],
bbox_label[i, 3], bbox_label[i, 4], bbox_label[i, 5],
bbox_label[i, 6]))
fout.close()
def bbox_label_to_obj(input_filename, out_filename_prefix, easy_view=False):
""" Visualization of bounding boxes.
Args:
input_filename: each line is x1 y1 z1 x2 y2 z2 label
out_filename_prefix: OBJ filename prefix,
visualize object by g_label2color
easy_view: if True, only visualize furniture and floor
Returns:
output a list of OBJ file and MTL files with the same prefix
"""
bbox_label = np.loadtxt(input_filename)
bbox = bbox_label[:, 0:6]
label = bbox_label[:, -1].astype(int)
v_cnt = 0 # count vertex
ins_cnt = 0 # count instance
for i in range(bbox.shape[0]):
if easy_view and (label[i] not in g_easy_view_labels):
continue
obj_filename = out_filename_prefix + '_' + \
g_classes[label[i]] + '_' + str(ins_cnt) + '.obj'
mtl_filename = out_filename_prefix + '_' + \
g_classes[label[i]] + '_' + str(ins_cnt) + '.mtl'
fout_obj = open(obj_filename, 'w')
fout_mtl = open(mtl_filename, 'w')
fout_obj.write('mtllib %s\n' % (os.path.basename(mtl_filename)))
length = bbox[i, 3:6] - bbox[i, 0:3]
a = length[0]
b = length[1]
c = length[2]
x = bbox[i, 0]
y = bbox[i, 1]
z = bbox[i, 2]
color = np.array(g_label2color[label[i]], dtype=float) / 255.0
material = 'material%d' % (ins_cnt)
fout_obj.write('usemtl %s\n' % (material))
fout_obj.write('v %f %f %f\n' % (x, y, z + c))
fout_obj.write('v %f %f %f\n' % (x, y + b, z + c))
fout_obj.write('v %f %f %f\n' % (x + a, y + b, z + c))
fout_obj.write('v %f %f %f\n' % (x + a, y, z + c))
fout_obj.write('v %f %f %f\n' % (x, y, z))
fout_obj.write('v %f %f %f\n' % (x, y + b, z))
fout_obj.write('v %f %f %f\n' % (x + a, y + b, z))
fout_obj.write('v %f %f %f\n' % (x + a, y, z))
fout_obj.write('g default\n')
v_cnt = 0 # for individual box
fout_obj.write('f %d %d %d %d\n' %
(4 + v_cnt, 3 + v_cnt, 2 + v_cnt, 1 + v_cnt))
fout_obj.write('f %d %d %d %d\n' %
(1 + v_cnt, 2 + v_cnt, 6 + v_cnt, 5 + v_cnt))
fout_obj.write('f %d %d %d %d\n' %
(7 + v_cnt, 6 + v_cnt, 2 + v_cnt, 3 + v_cnt))
fout_obj.write('f %d %d %d %d\n' %
(4 + v_cnt, 8 + v_cnt, 7 + v_cnt, 3 + v_cnt))
fout_obj.write('f %d %d %d %d\n' %
(5 + v_cnt, 8 + v_cnt, 4 + v_cnt, 1 + v_cnt))
fout_obj.write('f %d %d %d %d\n' %
(5 + v_cnt, 6 + v_cnt, 7 + v_cnt, 8 + v_cnt))
fout_obj.write('\n')
fout_mtl.write('newmtl %s\n' % (material))
fout_mtl.write('Kd %f %f %f\n' % (color[0], color[1], color[2]))
fout_mtl.write('\n')
fout_obj.close()
fout_mtl.close()
v_cnt += 8
ins_cnt += 1
def bbox_label_to_obj_room(input_filename, out_filename_prefix, easy_view=False, permute=None, center=False, exclude_table=False):
""" Visualization of bounding boxes.
Args:
input_filename: each line is x1 y1 z1 x2 y2 z2 label
out_filename_prefix: OBJ filename prefix,
visualize object by g_label2color
easy_view: if True, only visualize furniture and floor
permute: if not None, permute XYZ for rendering, e.g. [0 2 1]
center: if True, move obj to have zero origin
Returns:
output a list of OBJ file and MTL files with the same prefix
"""
bbox_label = np.loadtxt(input_filename)
bbox = bbox_label[:, 0:6]
if permute is not None:
assert(len(permute) == 3)
permute = np.array(permute)
bbox[:, 0:3] = bbox[:, permute]
bbox[:, 3:6] = bbox[:, permute + 3]
if center:
xyz_max = np.amax(bbox[:, 3:6], 0)
bbox[:, 0:3] -= (xyz_max / 2.0)
bbox[:, 3:6] -= (xyz_max / 2.0)
bbox /= np.max(xyz_max / 2.0)
label = bbox_label[:, -1].astype(int)
obj_filename = out_filename_prefix + '.obj'
mtl_filename = out_filename_prefix + '.mtl'
fout_obj = open(obj_filename, 'w')
fout_mtl = open(mtl_filename, 'w')
fout_obj.write('mtllib %s\n' % (os.path.basename(mtl_filename)))
v_cnt = 0 # count vertex
ins_cnt = 0 # count instance
for i in range(bbox.shape[0]):
if easy_view and (label[i] not in g_easy_view_labels):
continue
if exclude_table and label[i] == g_classes.index('table'):
continue
length = bbox[i, 3:6] - bbox[i, 0:3]
a = length[0]
b = length[1]
c = length[2]
x = bbox[i, 0]
y = bbox[i, 1]
z = bbox[i, 2]
color = np.array(g_label2color[label[i]], dtype=float) / 255.0
material = 'material%d' % (ins_cnt)
fout_obj.write('usemtl %s\n' % (material))
fout_obj.write('v %f %f %f\n' % (x, y, z + c))
fout_obj.write('v %f %f %f\n' % (x, y + b, z + c))
fout_obj.write('v %f %f %f\n' % (x + a, y + b, z + c))
fout_obj.write('v %f %f %f\n' % (x + a, y, z + c))
fout_obj.write('v %f %f %f\n' % (x, y, z))
fout_obj.write('v %f %f %f\n' % (x, y + b, z))
fout_obj.write('v %f %f %f\n' % (x + a, y + b, z))
fout_obj.write('v %f %f %f\n' % (x + a, y, z))
fout_obj.write('g default\n')
fout_obj.write('f %d %d %d %d\n' %
(4 + v_cnt, 3 + v_cnt, 2 + v_cnt, 1 + v_cnt))
fout_obj.write('f %d %d %d %d\n' %
(1 + v_cnt, 2 + v_cnt, 6 + v_cnt, 5 + v_cnt))
fout_obj.write('f %d %d %d %d\n' %
(7 + v_cnt, 6 + v_cnt, 2 + v_cnt, 3 + v_cnt))
fout_obj.write('f %d %d %d %d\n' %
(4 + v_cnt, 8 + v_cnt, 7 + v_cnt, 3 + v_cnt))
fout_obj.write('f %d %d %d %d\n' %
(5 + v_cnt, 8 + v_cnt, 4 + v_cnt, 1 + v_cnt))
fout_obj.write('f %d %d %d %d\n' %
(5 + v_cnt, 6 + v_cnt, 7 + v_cnt, 8 + v_cnt))
fout_obj.write('\n')
fout_mtl.write('newmtl %s\n' % (material))
fout_mtl.write('Kd %f %f %f\n' % (color[0], color[1], color[2]))
fout_mtl.write('\n')
v_cnt += 8
ins_cnt += 1
fout_obj.close()
fout_mtl.close()
def collect_point_bounding_box(anno_path, out_filename, file_format):
""" Compute bounding boxes from each instance in original dataset files on
one room. **We assume the bbox is aligned with XYZ coordinate.**
Save both the point XYZRGB and the bounding box for the point's
parent element.
Args:
anno_path: path to annotations. e.g. Area_1/office_2/Annotations/
out_filename: path to save instance bounding boxes for each point,
plus the point's XYZRGBL
each line is XYZRGBL offsetX offsetY offsetZ a b c,
where cx = X+offsetX, cy=X+offsetY, cz=Z+offsetZ
where (cx,cy,cz) is center of the box, a,b,c are distances from center
to the surfaces of the box, i.e. x1 = cx-a, x2 = cx+a, y1=cy-b etc.
file_format: output file format, txt or numpy
Returns:
None
Note:
room points are shifted, the most negative point is now at origin.
"""
point_bbox_list = []
for f in glob.glob(os.path.join(anno_path, '*.txt')):
cls = os.path.basename(f).split('_')[0]
if cls not in g_classes: # note: in some room there is 'staris' class..
cls = 'clutter'
points = np.loadtxt(f) # Nx6
label = g_class2label[cls] # N,
# Compute tightest axis aligned bounding box
xyz_min = np.amin(points[:, 0:3], axis=0) # 3,
xyz_max = np.amax(points[:, 0:3], axis=0) # 3,
xyz_center = (xyz_min + xyz_max) / 2
dimension = (xyz_max - xyz_min) / 2
xyz_offsets = xyz_center - points[:, 0:3] # Nx3
dimensions = np.ones((points.shape[0], 3)) * dimension # Nx3
labels = np.ones((points.shape[0], 1)) * label # N
point_bbox_list.append(np.concatenate([points, labels,
xyz_offsets, dimensions], 1)) # Nx13
point_bbox = np.concatenate(point_bbox_list, 0) # KxNx13
room_xyz_min = np.amin(point_bbox[:, 0:3], axis=0)
point_bbox[:, 0:3] -= room_xyz_min
if file_format == 'txt':
fout = open(out_filename, 'w')
for i in range(point_bbox.shape[0]):
fout.write('%f %f %f %d %d %d %d %f %f %f %f %f %f\n' %
(point_bbox[i, 0], point_bbox[i, 1], point_bbox[i, 2],
point_bbox[i, 3], point_bbox[i,
4], point_bbox[i, 5],
point_bbox[i, 6],
point_bbox[i, 7], point_bbox[i,
8], point_bbox[i, 9],
point_bbox[i, 10], point_bbox[i, 11], point_bbox[i, 12]))
fout.close()
elif file_format == 'numpy':
np.save(out_filename, point_bbox)
else:
print('ERROR!! Unknown file format: %s, please use txt or numpy.' %
(file_format))
exit()
| 28,031 | 39.160458 | 134 | py |
FPConv | FPConv-master/utils/saver.py | import os
import torch
class Saver():
def __init__(self, save_dir, max_files=10):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
self.log_list = []
self.save_dir = save_dir
self.max_files = max_files
self.saver_log_path = os.path.join(save_dir, '.saver_log')
if os.path.isfile(self.saver_log_path):
with open(self.saver_log_path, 'r') as f:
self.log_list = f.read().splitlines()
def save_checkpoint(self, model, epoch, ckpt_name, best=False):
if isinstance(model, torch.nn.DataParallel) or isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_state = model.module.state_dict()
else:
model_state = model.state_dict()
state = {'epoch': epoch, 'model_state': model_state}
ckpt_name = '{}.pth'.format(ckpt_name)
save_path = os.path.join(self.save_dir, ckpt_name)
torch.save(state, save_path)
self.log_list.insert(0, save_path)
if len(self.log_list) > self.max_files:
pop_file = self.log_list.pop()
if pop_file != save_path:
if os.path.isfile(pop_file):
os.remove(pop_file)
with open(self.saver_log_path, 'w') as f:
for log in self.log_list:
f.write(log + '\n')
def load_checkpoint(self, model, filename):
if os.path.isfile(filename):
log_str("==> Loading from checkpoint %s" % filename)
checkpoint = torch.load(filename)
epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['model_state'])
log_str("==> Done")
else:
raise FileNotFoundError
return epoch | 1,784 | 34.7 | 116 | py |
FPConv | FPConv-master/utils/switchnorm.py | import torch
import torch.nn as nn
def convert_sn(module, momentum=0.95):
module_output = module
if isinstance(module, torch.nn.BatchNorm3d):
module_output = SwitchNorm3d(module.num_features)
elif isinstance(module, torch.nn.BatchNorm2d):
module_output = SwitchNorm2d(module.num_features)
elif isinstance(module, torch.nn.BatchNorm1d):
module_output = SwitchNorm1d(module.num_features)
for name, child in module.named_children():
module_output.add_module(name, convert_sn(child, momentum))
del module
return module_output
class SwitchNorm1d(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.95, using_moving_average=True, using_bn=True,
last_gamma=False):
super(SwitchNorm1d, self).__init__()
self.eps = eps
self.momentum = momentum
self.using_moving_average = using_moving_average
self.using_bn = using_bn
self.last_gamma = last_gamma
self.weight = nn.Parameter(torch.ones(1, num_features, 1, 1))
self.bias = nn.Parameter(torch.zeros(1, num_features, 1, 1))
if self.using_bn:
self.mean_weight = nn.Parameter(torch.ones(3))
self.var_weight = nn.Parameter(torch.ones(3))
else:
self.mean_weight = nn.Parameter(torch.ones(2))
self.var_weight = nn.Parameter(torch.ones(2))
if self.using_bn:
self.register_buffer('running_mean', torch.zeros(1, num_features, 1))
self.register_buffer('running_var', torch.zeros(1, num_features, 1))
self.reset_parameters()
def reset_parameters(self):
if self.using_bn:
self.running_mean.zero_()
self.running_var.zero_()
if self.last_gamma:
self.weight.data.fill_(0)
else:
self.weight.data.fill_(1)
self.bias.data.zero_()
def _check_input_dim(self, input):
if input.dim() != 3:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
def forward(self, x):
self._check_input_dim(x)
x = x.unsqueeze(-1)
N, C, H, W = x.size()
x = x.view(N, C, -1)
mean_in = x.mean(-1, keepdim=True)
var_in = x.var(-1, keepdim=True)
mean_ln = mean_in.mean(1, keepdim=True)
temp = var_in + mean_in ** 2
var_ln = temp.mean(1, keepdim=True) - mean_ln ** 2
if self.using_bn:
if self.training:
mean_bn = mean_in.mean(0, keepdim=True)
var_bn = temp.mean(0, keepdim=True) - mean_bn ** 2
if self.using_moving_average:
self.running_mean.mul_(self.momentum)
self.running_mean.add_((1 - self.momentum) * mean_bn.data)
self.running_var.mul_(self.momentum)
self.running_var.add_((1 - self.momentum) * var_bn.data)
else:
self.running_mean.add_(mean_bn.data)
self.running_var.add_(mean_bn.data ** 2 + var_bn.data)
else:
mean_bn = torch.autograd.Variable(self.running_mean)
var_bn = torch.autograd.Variable(self.running_var)
softmax = nn.Softmax(0)
mean_weight = softmax(self.mean_weight)
var_weight = softmax(self.var_weight)
if self.using_bn:
mean = mean_weight[0] * mean_in + mean_weight[1] * mean_ln + mean_weight[2] * mean_bn
var = var_weight[0] * var_in + var_weight[1] * var_ln + var_weight[2] * var_bn
else:
mean = mean_weight[0] * mean_in + mean_weight[1] * mean_ln
var = var_weight[0] * var_in + var_weight[1] * var_ln
x = (x-mean) / (var+self.eps).sqrt()
x = x.view(N, C, H, W)
x = x * self.weight + self.bias
return x.squeeze(-1)
class SwitchNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.95, using_moving_average=True, using_bn=True,
last_gamma=False):
super(SwitchNorm2d, self).__init__()
self.eps = eps
self.momentum = momentum
self.using_moving_average = using_moving_average
self.using_bn = using_bn
self.last_gamma = last_gamma
self.weight = nn.Parameter(torch.ones(1, num_features, 1, 1))
self.bias = nn.Parameter(torch.zeros(1, num_features, 1, 1))
if self.using_bn:
self.mean_weight = nn.Parameter(torch.ones(3))
self.var_weight = nn.Parameter(torch.ones(3))
else:
self.mean_weight = nn.Parameter(torch.ones(2))
self.var_weight = nn.Parameter(torch.ones(2))
if self.using_bn:
self.register_buffer('running_mean', torch.zeros(1, num_features, 1))
self.register_buffer('running_var', torch.zeros(1, num_features, 1))
self.reset_parameters()
def reset_parameters(self):
if self.using_bn:
self.running_mean.zero_()
self.running_var.zero_()
if self.last_gamma:
self.weight.data.fill_(0)
else:
self.weight.data.fill_(1)
self.bias.data.zero_()
def _check_input_dim(self, input):
if input.dim() != 4:
raise ValueError('expected 4D input (got {}D input)'
.format(input.dim()))
def forward(self, x):
self._check_input_dim(x)
N, C, H, W = x.size()
x = x.view(N, C, -1)
mean_in = x.mean(-1, keepdim=True)
var_in = x.var(-1, keepdim=True)
mean_ln = mean_in.mean(1, keepdim=True)
temp = var_in + mean_in ** 2
var_ln = temp.mean(1, keepdim=True) - mean_ln ** 2
if self.using_bn:
if self.training:
mean_bn = mean_in.mean(0, keepdim=True)
var_bn = temp.mean(0, keepdim=True) - mean_bn ** 2
if self.using_moving_average:
self.running_mean.mul_(self.momentum)
self.running_mean.add_((1 - self.momentum) * mean_bn.data)
self.running_var.mul_(self.momentum)
self.running_var.add_((1 - self.momentum) * var_bn.data)
else:
self.running_mean.add_(mean_bn.data)
self.running_var.add_(mean_bn.data ** 2 + var_bn.data)
else:
mean_bn = torch.autograd.Variable(self.running_mean)
var_bn = torch.autograd.Variable(self.running_var)
softmax = nn.Softmax(0)
mean_weight = softmax(self.mean_weight)
var_weight = softmax(self.var_weight)
if self.using_bn:
mean = mean_weight[0] * mean_in + mean_weight[1] * mean_ln + mean_weight[2] * mean_bn
var = var_weight[0] * var_in + var_weight[1] * var_ln + var_weight[2] * var_bn
else:
mean = mean_weight[0] * mean_in + mean_weight[1] * mean_ln
var = var_weight[0] * var_in + var_weight[1] * var_ln
x = (x-mean) / (var+self.eps).sqrt()
x = x.view(N, C, H, W)
return x * self.weight + self.bias
class SwitchNorm3d(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.997, using_moving_average=True, using_bn=True,
last_gamma=False):
super(SwitchNorm3d, self).__init__()
self.eps = eps
self.momentum = momentum
self.using_moving_average = using_moving_average
self.using_bn = using_bn
self.last_gamma = last_gamma
self.weight = nn.Parameter(torch.ones(1, num_features, 1, 1, 1))
self.bias = nn.Parameter(torch.zeros(1, num_features, 1, 1, 1))
if self.using_bn:
self.mean_weight = nn.Parameter(torch.ones(3))
self.var_weight = nn.Parameter(torch.ones(3))
else:
self.mean_weight = nn.Parameter(torch.ones(2))
self.var_weight = nn.Parameter(torch.ones(2))
if self.using_bn:
self.register_buffer('running_mean', torch.zeros(1, num_features, 1))
self.register_buffer('running_var', torch.zeros(1, num_features, 1))
self.reset_parameters()
def reset_parameters(self):
if self.using_bn:
self.running_mean.zero_()
self.running_var.zero_()
if self.last_gamma:
self.weight.data.fill_(0)
else:
self.weight.data.fill_(1)
self.bias.data.zero_()
def _check_input_dim(self, input):
if input.dim() != 5:
raise ValueError('expected 5D input (got {}D input)'
.format(input.dim()))
def forward(self, x):
self._check_input_dim(x)
N, C, D, H, W = x.size()
x = x.view(N, C, -1)
mean_in = x.mean(-1, keepdim=True)
var_in = x.var(-1, keepdim=True)
mean_ln = mean_in.mean(1, keepdim=True)
temp = var_in + mean_in ** 2
var_ln = temp.mean(1, keepdim=True) - mean_ln ** 2
if self.using_bn:
if self.training:
mean_bn = mean_in.mean(0, keepdim=True)
var_bn = temp.mean(0, keepdim=True) - mean_bn ** 2
if self.using_moving_average:
self.running_mean.mul_(self.momentum)
self.running_mean.add_((1 - self.momentum) * mean_bn.data)
self.running_var.mul_(self.momentum)
self.running_var.add_((1 - self.momentum) * var_bn.data)
else:
self.running_mean.add_(mean_bn.data)
self.running_var.add_(mean_bn.data ** 2 + var_bn.data)
else:
mean_bn = torch.autograd.Variable(self.running_mean)
var_bn = torch.autograd.Variable(self.running_var)
softmax = nn.Softmax(0)
mean_weight = softmax(self.mean_weight)
var_weight = softmax(self.var_weight)
if self.using_bn:
mean = mean_weight[0] * mean_in + mean_weight[1] * mean_ln + mean_weight[2] * mean_bn
var = var_weight[0] * var_in + var_weight[1] * var_ln + var_weight[2] * var_bn
else:
mean = mean_weight[0] * mean_in + mean_weight[1] * mean_ln
var = var_weight[0] * var_in + var_weight[1] * var_ln
x = (x - mean) / (var + self.eps).sqrt()
x = x.view(N, C, D, H, W)
return x * self.weight + self.bias
| 10,468 | 38.958015 | 104 | py |
FPConv | FPConv-master/utils/collect_indoor3d_data.py | """
https://github.com/charlesq34/pointnet/blob/master/sem_seg/collect_indoor3d_data.py
"""
import os, sys
import indoor3d_util
import argparse
import json
parser = argparse.ArgumentParser(description="Arg parser")
parser.add_argument("--config", type=str, default='../config.json')
args = parser.parse_args()
with open(args.config, 'r') as f:
_cfg = json.load(f)
print(_cfg)
UTILS_DIR = os.path.dirname(os.path.abspath(__file__))
META_ROOT = os.path.join(UTILS_DIR, 's3dis_meta')
RAW_ROOT = _cfg['s3dis_aligned_raw']
OUTPUT_ROOT = _cfg['s3dis_data_root']
print('meta root:', META_ROOT)
print('raw root:', RAW_ROOT)
print('output root:', OUTPUT_ROOT)
anno_paths = os.path.join(META_ROOT, 'annotations.txt')
anno_paths = [line.rstrip() for line in open(anno_paths)]
output_folder = OUTPUT_ROOT
if not os.path.exists(output_folder):
os.mkdir(output_folder)
# NOTE: there is an extra character in the v1.2 data in Area_5/hallway_6. It's fixed manually.
for anno_path in anno_paths:
print(anno_path)
elements = anno_path.split('/')
out_filename = elements[-3] + '_' + \
elements[-2] + '.npy'
if os.path.exists(os.path.join(output_folder, out_filename)):
continue
indoor3d_util.collect_point_label(os.path.join(RAW_ROOT, anno_path),
os.path.join(output_folder, out_filename),
'numpy', False)
| 1,424 | 31.386364 | 94 | py |
FPConv | FPConv-master/utils/__init__.py | 0 | 0 | 0 | py | |
FPConv | FPConv-master/utils/collect_scannet_pickle.py | """
Modified from https://github.com/DylanWusee/pointconv/blob/master/scannet/scannetv2_seg_dataset_rgb21c_pointid.py
"""
import os
import sys
import numpy as np
import pickle
from plyfile import PlyData, PlyElement
import json
import argparse
parser = argparse.ArgumentParser(description="Arg parser")
parser.add_argument("--config", type=str, default='../config.json')
args = parser.parse_args()
with open(args.config, 'r') as f:
_cfg = json.load(f)
print(_cfg)
UTILS_DIR = os.path.dirname(os.path.abspath(__file__))
LIST_ROOT = os.path.join(UTILS_DIR, 'scannet_datalist')
def remove_unano(scene_data, scene_label, scene_data_id):
keep_idx = np.where((scene_label > 0) & (scene_label < 41)) # 0: unanotated
scene_data_clean = scene_data[keep_idx]
scene_label_clean = scene_label[keep_idx]
scene_data_id_clean = scene_data_id[keep_idx]
return scene_data_clean, scene_label_clean, scene_data_id_clean
test_class = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]
def gen_label_map():
label_map = np.zeros(41)
for i in range(41):
if i in test_class:
label_map[i] = test_class.index(i)
else:
label_map[i] = 0
print(label_map)
return label_map
def gen_pickle(split="eval", root="/data5/yiqun/scannet"):
if split == 'test':
root = os.path.join(root, "scans_test")
else:
root = os.path.join(root, "scans")
file_list = os.path.join(LIST_ROOT, "scannetv2_%s.txt"%(split))
with open(file_list) as fl:
scene_id = fl.read().splitlines()
scene_data = []
scene_data_labels = []
scene_data_id = []
scene_data_num = []
label_map = gen_label_map()
for i in range(len(scene_id)): #len(scene_id)
print('process...', scene_id[i], '...', i+1, '/', len(scene_id))
scene_namergb = os.path.join(root, scene_id[i], scene_id[i]+'_vh_clean_2.ply')
scene_xyzlabelrgb = PlyData.read(scene_namergb)
scene_vertex_rgb = scene_xyzlabelrgb['vertex']
scene_data_tmp = np.stack((scene_vertex_rgb['x'], scene_vertex_rgb['y'],
scene_vertex_rgb['z'], scene_vertex_rgb['red'],
scene_vertex_rgb['green'], scene_vertex_rgb['blue']), axis = -1).astype(np.float32)
scene_points_num = scene_data_tmp.shape[0]
scene_point_id = np.array([c for c in range(scene_points_num)])
if split != 'test':
scene_name = os.path.join(root, scene_id[i], scene_id[i]+'_vh_clean_2.labels.ply')
scene_xyzlabel = PlyData.read(scene_name)
scene_vertex = scene_xyzlabel['vertex']
scene_data_label_tmp = scene_vertex['label']
scene_data_tmp, scene_data_label_tmp, scene_point_id_tmp = remove_unano(scene_data_tmp, scene_data_label_tmp, scene_point_id)
else:
scene_data_label_tmp = np.zeros((scene_data_tmp.shape[0])).astype(np.int32)
scene_point_id_tmp = scene_point_id
scene_data_label_tmp = label_map[scene_data_label_tmp]
scene_data.append(scene_data_tmp)
scene_data_labels.append(scene_data_label_tmp)
scene_data_id.append(scene_point_id_tmp)
scene_data_num.append(scene_points_num)
save_path = os.path.join(_cfg['scannet_pickle'], "scannet_%s_rgb21c_pointid.pickle"%(split))
pickle_out = open(save_path, "wb")
pickle.dump(scene_data, pickle_out, protocol=0)
pickle.dump(scene_data_labels, pickle_out, protocol=0)
pickle.dump(scene_data_id, pickle_out, protocol=0)
pickle.dump(scene_data_num, pickle_out, protocol=0)
pickle_out.close()
if __name__ =='__main__':
root = _cfg['scannet_raw']
gen_pickle(split='train', root=root)
gen_pickle(split='eval', root=root)
gen_pickle(split='test', root=root)
print('Done!!!') | 3,860 | 39.642105 | 137 | py |
MPMQA | MPMQA-master/parser.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import deepspeed
def get_base_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default=None, help='load args from json type config file. It will override the parser setting')
# Output
parser.add_argument('--output_dir', type=str, default='./output')
# Dist
parser.add_argument('--local_rank', type=int, default=0)
parser = deepspeed.add_config_arguments(parser)
# Model
parser.add_argument('--pretrained_dir', type=str, default='./pretrained/t5-base')
parser.add_argument('--checkpoint', type=str, default=None)
parser.add_argument('--load_module_only', action='store_true', default=False, help='only load model weights not optimizer')
# Tasks
## QA
parser.add_argument('--text_answer', type=bool, default=True, help='perform question answering')
## VA
parser.add_argument('--visual_answer', action='store_true', default=False)
parser.add_argument('--va_type', type=str, default='tokenwise', choices=['global', 'tokenwise'], help='predict related region at token/global level')
parser.add_argument('--va_module_type', type=str, default='map', choices=['map', 'linear', 'mlp'])
parser.add_argument('--va_label_smoothing', type=float, default=0.0, help='label smoothing value when performing bce loss')
parser.add_argument('--min_va',type=int, default=1, help='Minimum number of related regions predicted by the saliency detector')
## Retrieval
parser.add_argument('--page_contrast', action='store_true', help='contrastively optimize question and page feature to be similar. loss: nce')
parser.add_argument('--page_contrast_bidirection', action='store_true', help='whether to calculate nce loss bidirectionally')
parser.add_argument('--page_contrast_type', default='global', choices=['global', 'tokenwise'], help='page contrast at global/local level')
parser.add_argument('--page_contrast_t', type=float, default=0.01, help='nce temperature for page contrast')
parser.add_argument('--page_contrast_module_type', type=str, default=None, choices=[None, 'linear', 'mlp'])
# Data
parser.add_argument('--root', type=str, default='data/VRManual', help='data root path')
parser.add_argument('--mask', action='store_true', default=False, help='add <mask> token')
# RoI feature extractor
parser.add_argument('--roi_config', type=str, default='detector/VG-BUA.yaml')
parser.add_argument('--roi_model', type=str, default='detector/pretrained/bua-d2-frcn-r101.pth')
parser.add_argument('--roi_bua', type=bool, default=True)
# Training
parser.add_argument('--no_cross', action='store_true', help='if set, do not encode question and page jointly.')
parser.add_argument('--debug', action='store_true', default=False, help='debug mode, only load the first batch')
parser.add_argument('--start_epoch', type=int, default=0, help='set when resume training')
parser.add_argument('--epoch', type=int, default=7)
parser.add_argument('--save_best_last', type=bool, default=True, help='if set, only the best and last ckeckpoint will be saved')
parser.add_argument('--weight_decay', type=float, default=0)
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--inf_batch_size', type=int, default=3)
parser.add_argument('--n_workers', type=int, default=4)
parser.add_argument('--seed', type=int, default=42)
# Inference & Evaluate
parser.add_argument('--use_retrieved_qa2dataid', default=False, help='whether to use top-1 retrieved page to perform QA/sd')
parser.add_argument('--retrieved_qa2dataid', default=None, help='Use retrieved top-1 page to perform qa/sd. A dict of split2paths')
parser.add_argument('--val_metric', type=str, default='ROUGE_L')
parser.add_argument('--val_metric_aggregate', choices=['mean', 'harmonic_mean'], default='harmonic_mean')
parser.add_argument('--eval_set', type=str, default='test')
parser.add_argument('--beam_size', type=int, default=4)
parser.add_argument('--length_penalty', type=float, default=1.0)
parser.add_argument('--max_dec_len', type=int, default=20)
parser.add_argument('--max_page_len', type=int, default=1024)
return parser
| 4,900 | 57.345238 | 153 | py |
MPMQA | MPMQA-master/evaluate.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import time
import torch
from torch.nn.functional import normalize as norm
import deepspeed
import numpy as np
import torch.distributed as dist
from tqdm import tqdm
from math import ceil
# from torch.optim import Adam
from torch.utils.data import DataLoader
from collections import defaultdict
from utils import set_seed, get_logger, obj_to_cuda, load_ckpt, \
boardcast_str, gather_list, remove_repeat_sample, retrieval_eval, merge_recall, \
unique_index_and_value
from parser import get_base_parser
from dataset.mqa_dataset import get_mqa_loader
from dataset.mqa_page_contrast import MQAContrastDataset, mqa_contrast_collate_fn
from models.mqa_model import MQAT5Model
from models.utils import pad_features
from sklearn.metrics import precision_score, recall_score, f1_score
from collections import OrderedDict
from scripts.compute_metrics import compute_visual_answer_by_region_cls, compute_qa_score_by_region_cls
from nlgeval import NLGEval
PUNCTUATIONS = ["''", "'", "``", "`", "-LRB-", "-RRB-", "-LCB-", "-RCB-", \
".", "?", "!", ",", ":", "-", "--", "...", ";"]
nlgeval = NLGEval(no_skipthoughts=True, no_glove=True) # loads the models
def remove_punc(line):
return ' '.join([w for w in line.rstrip().split(' ') \
if w not in PUNCTUATIONS])
def compute_visual_answer_metics(pred_related_regions, gt_regions, all_regions):
all_y_true = []
all_y_pred = []
instance_p = 0
instance_r = 0
instance_f1 = 0
no_preds = 0
for i, (instance_region_ids, instance_preds, instance_gts) in enumerate(zip(all_regions, pred_related_regions, gt_regions)):
y_true = []
y_pred = []
if len(instance_preds) == 0:
no_preds += 1
for _id in instance_region_ids:
if _id in instance_preds:
y_pred.append(1)
else:
y_pred.append(0)
if _id in instance_gts:
y_true.append(1)
else:
y_true.append(0)
instance_p += precision_score(y_true, y_pred, average='binary')
instance_r += recall_score(y_true, y_pred, average='binary')
instance_f1 += f1_score(y_true, y_pred, average='binary')
all_y_true.extend(y_true)
all_y_pred.extend(y_pred)
instance_p = instance_p / len(all_regions)
instance_r = instance_r / len(all_regions)
instance_f1 = instance_f1 / len(all_regions)
all_p = precision_score(all_y_true, all_y_pred, average='binary')
all_r = recall_score(all_y_true, all_y_pred, average='binary')
all_f1 = f1_score(all_y_true, all_y_pred, average='binary')
metrics = {
'instance_precision': instance_p,
'instance_recall': instance_r,
'instance_f1': instance_f1,
'all_precision': all_p,
'all_recall': all_r,
'all_f1': all_f1
}
if no_preds > 0:
print('#########################################')
print(f'{no_preds}/{len(all_regions)} instances has no predictions!!')
print('#########################################')
return metrics
def evaluate_page_contrast(args, model, page_contrast_dataset, logger, save_fn="temp.json"):
model.eval()
manuals = page_contrast_dataset.manuals
all_metrics = []
save_fn = ''.join(save_fn.split('.')[:-1])
qa2topimgs = OrderedDict()
for i, manual_name in tqdm(enumerate(manuals), total=len(manuals)):
torch.cuda.empty_cache()
page_contrast_dataset.set_manual(manual_name)
sampler = torch.utils.data.DistributedSampler(
page_contrast_dataset,
num_replicas=dist.get_world_size(),
rank=dist.get_rank(),
shuffle=False
)
sampler = torch.utils.data.BatchSampler(
sampler,
batch_size=args.inf_batch_size,
drop_last=False
)
dataloader = DataLoader(
dataset=page_contrast_dataset,
shuffle=False,
batch_sampler=sampler,
num_workers=args.n_workers,
collate_fn=mqa_contrast_collate_fn
)
qaids = []
dataids = []
q_features = []
page_features = []
q_mask = []
page_mask = []
for j, batch in tqdm(enumerate(dataloader), total=len(dataloader)):
batch = obj_to_cuda(batch)
qaids.extend(batch['qaids'])
dataids.extend(batch['dataids'])
with torch.no_grad():
return_hidden = (args.page_contrast_type == 'tokenwise')
try:
question_feature, context_feature = model.module.two_stream_encoding(return_hidden=return_hidden, **batch)
except RuntimeError:
torch.cuda.empty_cache()
question_feature, context_feature = model.module.two_stream_encoding(return_hidden=return_hidden, **batch)
if args.page_contrast_module_type is not None:
question_feature = model.module.page_contrast_module(question_feature)
context_feature = model.module.page_contrast_module(context_feature)
question_feature = norm(question_feature, dim=-1)
context_feature = norm(context_feature, dim=-1)
q_features.append(question_feature.detach().cpu())
page_features.append(context_feature.detach().cpu())
q_mask.append(batch['question_attn_mask'])
page_mask.append(batch['context_attn_mask'])
qaids = gather_list(qaids)
dataids = gather_list(dataids)
q_features = gather_list(q_features)
page_features = gather_list(page_features)
q_mask = gather_list(q_mask)
page_mask = gather_list(page_mask)
qaids, unique_qa_index = unique_index_and_value(qaids)
dataids, unique_page_index = unique_index_and_value(dataids)
if dist.get_rank() == 0:
if args.page_contrast_type == 'global':
q_features = torch.cat(q_features, dim=0)
page_features = torch.cat(page_features, dim=0)
q_features = q_features[unique_qa_index]
page_features = page_features[unique_page_index]
sim_matrix = torch.matmul(q_features, page_features.t())
metrics, qa2topimg = retrieval_eval(sim_matrix.float(), qaids, dataids,
page_contrast_dataset.qaid2dataid, page_contrast_dataset.dataid2qaids, return_top_imgs=True)
elif args.page_contrast_type == 'tokenwise':
q_features = pad_features(q_features)
page_features = pad_features(page_features)
q_mask = pad_features(q_mask)
page_mask = pad_features(page_mask)
q_features = q_features[unique_qa_index]
q_mask = q_mask[unique_qa_index]
page_features = page_features[unique_page_index]
page_mask = page_mask[unique_page_index]
# sim_matrix = torch.matmul(q_features, page_features.t())
with torch.no_grad():
try:
sim_matrix_qc, sim_matrix_cq = model.module.similarity_score(q_features.cuda(), page_features.cuda(), q_mask.cuda(), page_mask.cuda())
except RuntimeError:
torch.cuda.empty_cache()
sim_matrix_qc, sim_matrix_cq = model.module.similarity_score(q_features, page_features, q_mask, page_mask)
sim_matrix_qc, sim_matrix_cq = sim_matrix_qc.float(), sim_matrix_cq.float()
metrics, qa2topimg = retrieval_eval(sim_matrix_qc, qaids, dataids,
page_contrast_dataset.qaid2dataid, page_contrast_dataset.dataid2qaids, sim_matrix_cq, return_top_imgs=True)
sim_matrix = sim_matrix_qc
del sim_matrix_cq
assert len(qa2topimgs.keys() & qa2topimg.keys()) == 0
assert len(qa2topimg.keys() & set(qaids)) == len(qa2topimg.keys()) == len(qaids)
qa2topimgs.update(qa2topimg)
print(f'Manual: {manual_name}')
for metric, score in metrics.items():
print(f'{metric}: {score:.3f}')
all_metrics.append(metrics)
predict_dir = os.path.join(args.output_dir, 'predict', page_contrast_dataset.split, 'page_contrast', save_fn, manual_name)
os.makedirs(predict_dir, exist_ok=True)
with open(os.path.join(predict_dir, 'metrics.json'), 'w') as f:
json.dump(metrics, f, indent=1)
with open(os.path.join(predict_dir, 'qaids.json'), 'w') as f:
json.dump(qaids, f, indent=1)
with open(os.path.join(predict_dir, 'dataids.json'), 'w') as f:
json.dump(dataids, f, indent=1)
with open(os.path.join(predict_dir, 'qaid2dataid.json'), 'w') as f:
json.dump(page_contrast_dataset.qaid2dataid, f, indent=1)
with open(os.path.join(predict_dir, 'dataid2qaids.json'), 'w') as f:
json.dump(page_contrast_dataset.dataid2qaids, f, indent=1)
np.save(os.path.join(predict_dir, 'score_matrix.npy'), sim_matrix.detach().cpu().numpy())
else:
metrics = defaultdict(int)
if dist.get_rank() == 0:
merged_metrics = merge_recall(all_metrics)
predict_dir = os.path.join(args.output_dir, 'predict', page_contrast_dataset.split, 'page_contrast', save_fn)
os.makedirs(predict_dir, exist_ok=True)
path = os.path.join(predict_dir, 'all.json')
with open(path, 'w') as f:
json.dump(merged_metrics, f, indent=1)
path = os.path.join(predict_dir, 'qa2topimgs.json')
with open(path, 'w') as f:
json.dump(qa2topimgs, f, indent=1)
logger.info('Average page retrieval performance')
for metric, score in merged_metrics.items():
logger.info(f'{metric}: {score:.3f}')
return merged_metrics
else:
return defaultdict(int)
def evaluate_visual_answer(args, model, val_loader, logger, sd_save_fn='temp.json', split='val'):
model.eval()
pred_related_regions = []
gt_regions = []
qa_ids = []
all_regions = []
for step, batch in tqdm(enumerate(val_loader), ncols=50, total=len(val_loader)):
batch = obj_to_cuda(batch)
with torch.no_grad():
pred_related_region = model.module.visual_answer_inference(**batch)
qa_ids.extend(batch['qa_ids'])
pred_related_regions.extend(pred_related_region)
gt_regions.extend(batch['related_regions'])
all_regions.extend(list(batch['region_positions'][i].keys()) for i in range(len(batch['qa_ids'])))
# Remove repeat samples
N_samples = len(val_loader.dataset)
samples_per_rank = ceil((N_samples-dist.get_rank())/dist.get_world_size())
qa_ids = qa_ids[:samples_per_rank]
pred_related_regions = pred_related_regions[:samples_per_rank]
gt_regions = gt_regions[:samples_per_rank]
all_regions = all_regions[:samples_per_rank]
qa_ids_list = [None] * dist.get_world_size()
dist.all_gather_object(qa_ids_list, qa_ids)
pred_related_regions_list = [None] * dist.get_world_size()
dist.all_gather_object(pred_related_regions_list, pred_related_regions)
gt_regions_list = [None] * dist.get_world_size()
dist.all_gather_object(gt_regions_list, gt_regions)
all_regions_list = [None] * dist.get_world_size()
dist.all_gather_object(all_regions_list, all_regions)
if dist.get_rank() == 0:
qa_ids, pred_related_regions, gt_regions, all_regions = [], [], [], []
gt_region_types, pred_region_types, all_region_types = [], [], []
for i in range(dist.get_world_size()):
qa_ids.extend(qa_ids_list[i])
pred_related_regions.extend(pred_related_regions_list[i])
gt_regions.extend(gt_regions_list[i])
all_regions.extend(all_regions_list[i])
for rids in gt_regions:
gt_region_types.append([val_loader.dataset.rid2cls[r] for r in rids if r in val_loader.dataset.rid2cls])
for rids in pred_related_regions:
pred_region_types.append([val_loader.dataset.rid2cls[r] for r in rids if r in val_loader.dataset.rid2cls])
for rids in all_regions:
all_region_types.append([val_loader.dataset.rid2cls[r] for r in rids if r in val_loader.dataset.rid2cls])
predict_items = []
for qa_id, pred_related_region, gt_region, all_region, gt_region_type, pred_region_type, all_region_type in zip(qa_ids, pred_related_regions, gt_regions, all_regions, gt_region_types, pred_region_types, all_region_types):
predict_items.append({
'image_id': qa_id,
'pred_regions': pred_related_region,
'pred_region_cls': pred_region_type,
'gt_regions': gt_region,
'gt_region_cls': gt_region_type,
'all_regions': all_region,
'all_region_cls': all_region_type
})
predict_dir = os.path.join(args.output_dir, 'predict', split, 'related_regions')
os.makedirs(predict_dir, exist_ok=True)
path = os.path.join(predict_dir, sd_save_fn)
with open(path, 'w') as f:
json.dump(predict_items, f, indent=1)
metrics = compute_visual_answer_metics(pred_related_regions, gt_regions, all_regions)
cls_metrics = compute_visual_answer_by_region_cls(predict_items, is_print=False)
cls_metrics['All'] = metrics
path = os.path.join(predict_dir, sd_save_fn.replace('.json', '_metrics.json'))
with open(path, 'w') as f:
json.dump(cls_metrics, f, indent=1)
for metric, score in metrics.items():
logger.info(f'{metric}: {score:.3f}')
return metrics
else:
return defaultdict(int)
def evaluate_question_answer(args, model, val_loader, logger, save_fn='temp.json', split='val'):
predictions = []
questions = []
gts = []
gt_regions = []
qa_ids = []
predict_items = []
image_paths = []
model.eval()
for step, batch in tqdm(enumerate(val_loader), ncols=50, total=len(val_loader)):
batch = obj_to_cuda(batch)
with torch.no_grad():
if args.beam_size is None or args.beam_size <= 1:
_, prediction = model.module.greedy_inference(**batch)
else:
_, prediction = model.module.beam_search(beam_size=args.beam_size, length_penalty=args.length_penalty, **batch)
predictions.extend(prediction)
qa_ids.extend(batch['qa_ids'])
image_paths.extend(batch['image_paths'])
questions.extend(model.module.tokenizer.batch_decode(batch['question_ids'], skip_special_tokens=True))
gts.extend(model.module.tokenizer.batch_decode(batch['answer_ids'], skip_special_tokens=True))
gt_regions.extend(batch['related_regions'])
for qa_id, image_path, question, predict, gt, gt_region in zip(qa_ids, image_paths, questions, predictions, gts, gt_regions):
predict_items.append({
'image_id': qa_id,
'image_path': image_path,
'question': question,
'caption': predict,
'gt': gt,
'gt_regions': gt_region,
'gt_region_cls': [val_loader.dataset.rid2cls[r] for r in gt_region if r in val_loader.dataset.rid2cls]
})
# Remove repeat samples
N_samples = len(val_loader.dataset)
samples_per_rank = ceil((N_samples-dist.get_rank())/dist.get_world_size())
predict_items = predict_items[:samples_per_rank]
predict_list = [None] * dist.get_world_size()
dist.all_gather_object(predict_list, predict_items)
if dist.get_rank() == 0:
candidates = []
for predict in predict_list:
candidates.extend(predict)
# assert len(candidates) == N_samples
try:
candidates.sort(key=lambda x: int(x['image_id']))
except:
candidates.sort(key=lambda x: x['image_id'])
predict_dir = os.path.join(args.output_dir, 'predict', split)
os.makedirs(predict_dir, exist_ok=True)
path = os.path.join(predict_dir, save_fn)
with open(path, 'w') as f:
json.dump(candidates, f, indent=1)
all_predictions = [x['caption'] for x in candidates]
all_answers = [x['gt'] for x in candidates]
all_predictions = [remove_punc(sent).lower() for sent in all_predictions]
all_answers = [[remove_punc(sent).lower() for sent in all_answers]]
metrics = nlgeval.compute_metrics(all_answers, all_predictions)
metrics_divide_by_cls = compute_qa_score_by_region_cls(candidates, is_print=False)
metrics_divide_by_cls['All'] = metrics
path = os.path.join(predict_dir, save_fn.replace('.json', '_metrics.json'))
with open(path, 'w') as f:
json.dump(metrics_divide_by_cls, f, indent=1)
for metric, score in metrics.items():
logger.info(f'{metric}: {score:.3f}')
return metrics
else:
return defaultdict(int)
def evaluate_ds(args, model, val_loader, logger, save_fn='temp.json', split='val'):
logger.info(f'Evaluating on {split} split...')
metrics = defaultdict(int)
if args.page_contrast:
dataset = MQAContrastDataset(args, args.root, model.module.tokenizer, split)
recall_metrics = evaluate_page_contrast(args, model, dataset, logger, save_fn=save_fn)
metrics.update(recall_metrics)
torch.cuda.empty_cache()
if args.use_retrieved_qa2dataid:
dist.barrier()
val_loader.dataset.set_use_retrieved_qa2dataid()
if args.visual_answer:
sd_metrics = evaluate_visual_answer(args, model, val_loader, logger, sd_save_fn=save_fn, split=split)
metrics.update(sd_metrics)
torch.cuda.empty_cache()
if args.text_answer:
qa_metrics = evaluate_question_answer(args, model, val_loader, logger, save_fn=save_fn, split=split)
metrics.update(qa_metrics)
torch.cuda.empty_cache()
if dist.get_rank() == 0:
for metric, score in metrics.items():
logger.info(f'{metric}: {score:.3f}')
return metrics
def main(args):
set_seed(args.seed)
torch.cuda.set_device(args.local_rank)
os.makedirs(args.output_dir, exist_ok=True)
logger = get_logger(os.path.join(args.output_dir, 'log.txt'))
logger.info(args)
if args.deepspeed:
deepspeed.init_distributed()
nowtime = None
if not args.deepspeed or (args.deepspeed and dist.get_rank() == 0):
nowtime = time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime())
os.makedirs(os.path.join(args.output_dir, 'eval_opt', nowtime), exist_ok=True)
with open(os.path.join(args.output_dir, 'eval_opt', nowtime, 'config.json'), 'w') as f:
json.dump(args.__dict__, f, indent=1, ensure_ascii=False)
if args.deepspeed_config is not None:
os.system(f'cp {args.deepspeed_config} {os.path.join(args.output_dir, "eval_opt", nowtime)}')
if args.deepspeed:
nowtime = boardcast_str(nowtime, src_rank=0)
logger = get_logger(os.path.join(args.output_dir, 'eval_opt', nowtime, f'log.{dist.get_rank()}.txt'))
else:
logger = get_logger(os.path.join(args.output_dir, 'eval_opt', nowtime, f'log.txt'))
logger.info(args)
model = MQAT5Model(args, pretrained_dir=args.pretrained_dir)
split2loader = OrderedDict()
if isinstance(args.eval_set, str):
splits = [args.eval_set]
else:
splits = args.eval_set
for split in splits:
split2loader[split] = get_mqa_loader(args, root=args.root, tokenizer=model.tokenizer, batch_size=args.batch_size, split=split, num_workers=args.n_workers, eval_on_train=True)
model.resize_token_embeddings()
model.cuda()
if args.deepspeed:
model, _, _, _ = deepspeed.initialize(
args=args,
model=model,
model_parameters=model.parameters()
)
if args.checkpoint:
load_ckpt(args, args.checkpoint, model, logger=logger, load_module_only=True)
for split, val_loader in split2loader.items():
evaluate_ds(args, model, val_loader, logger, save_fn=args.save_fn, split=split)
if __name__ == '__main__':
parser = get_base_parser()
parser.add_argument('--save_fn', type=str, default='temp.json')
parser.add_argument('--sd_save_fn', type=str, default='temp.json')
args = parser.parse_args()
if args.config is not None:
args_dict = json.load(open(args.config, 'r', encoding='utf-8'))
for key, value in args_dict.items():
if key == 'local_rank':
continue
setattr(args, key, value)
main(args)
| 21,617 | 42.761134 | 229 | py |
MPMQA | MPMQA-master/utils.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
import random
import logging
import argparse
import deepspeed
import numpy as np
from math import ceil
import torch.distributed as dist
from collections import OrderedDict, defaultdict
def obj_to_cuda(obj):
if isinstance(obj, torch.Tensor):
return obj.cuda()
elif isinstance(obj, list):
return [obj_to_cuda(t) for t in obj]
elif isinstance(obj, tuple):
return (obj_to_cuda(t) for t in obj)
elif isinstance(obj, dict):
return {key: obj_to_cuda(t) for key, t in obj.items()}
else:
return obj
def save_ckpt(args, model, optimizer, output_dir, epoch, logger):
os.makedirs(os.path.join(output_dir, 'ckpts'), exist_ok=True)
ckpt_path = os.path.join(output_dir, 'ckpts', f'checkpoint.{epoch}')
logger.info(f'Saving checkpoint {ckpt_path}...')
if args.deepspeed:
ckpt_path = ckpt_path.rstrip('/')
tag = ckpt_path.split('/')[-1]
load_dir = '/'.join(ckpt_path.split('/')[:-1])
model.save_checkpoint(load_dir, tag)
else:
checkpoint = OrderedDict()
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
checkpoint['epoch'] = epoch
torch.save(checkpoint, ckpt_path, _use_new_zipfile_serialization=False)
def load_ckpt(args, ckpt_path, model, optimizer=None, logger=None, load_module_only=False):
if logger is not None:
logger.info(f'Loading model from {ckpt_path}')
if args.deepspeed:
ckpt_path = ckpt_path.rstrip('/')
tag = ckpt_path.split('/')[-1]
load_dir = '/'.join(ckpt_path.split('/')[:-1])
model.load_checkpoint(load_dir, tag, load_module_only=load_module_only,
load_module_strict=False,
load_optimizer_states=(not load_module_only),
load_lr_scheduler_states=(not load_module_only))
else:
ckpt = torch.load(ckpt_path, map_location='cpu')
model.load_state_dict(ckpt['model'])
if optimizer is not None and not load_module_only:
if logger is not None:
logger.info(f'Loading optimizer')
optimizer.load_state_dict(ckpt['optimizer'])
@torch.no_grad()
def retrieval_eval(score_matrix, txt_ids, img_ids, txt2img, img2txts, score_matrix_2=None, return_top_imgs=False):
# image retrieval
img2j = {i: j for j, i in enumerate(img_ids)}
_, rank_txt = score_matrix.topk(min(10, score_matrix.size(1)), dim=1)
txt2topimg = OrderedDict()
topimgs = rank_txt[:, 0]
for i, txt_id in enumerate(txt_ids):
txt2topimg[txt_id] = img_ids[topimgs[i]]
if score_matrix.size(1) < 10:
print(f'WARNING: find {score_matrix.size(1)} candidate images, less than 10.')
gt_img_j = torch.LongTensor([img2j[txt2img[txt_id]]
for txt_id in txt_ids],
).to(rank_txt.device
).unsqueeze(1).expand_as(rank_txt)
rank = (rank_txt == gt_img_j).nonzero()[:,1]
if rank.numel():
ir_r1 = (rank < 1).sum().item() / len(txt_ids)
ir_r3 = (rank < 3).sum().item() / len(txt_ids)
ir_r5 = (rank < 5).sum().item() / len(txt_ids)
ir_r10 = (rank < 10).sum().item() / len(txt_ids)
else:
ir_r1, ir_r3, ir_r5, ir_r10 = 0, 0, 0, 0
# text retrieval
txt2i = {t: i for i, t in enumerate(txt_ids)}
if score_matrix_2 is not None:
score_matrix = score_matrix_2.t()
_, rank_img = score_matrix.topk(min(10, score_matrix.size(0)), dim=0)
if score_matrix.size(0) < 10:
print(f'WARNING: find {score_matrix.size(0)} candidate txts, less than 10.')
tr_r1, tr_r3, tr_r5, tr_r10 = 0, 0, 0, 0
for j, img_id in enumerate(img_ids):
gt_is = [txt2i[t] for t in img2txts[img_id]]
ranks = [(rank_img[:, j] == i).nonzero() for i in gt_is]
rank = min([10] + [r.item() for r in ranks if r.numel()])
if rank < 1:
tr_r1 += 1
if rank < 3:
tr_r3 += 1
if rank < 5:
tr_r5 += 1
if rank < 10:
tr_r10 += 1
tr_r1 /= len(img_ids)
tr_r3 /= len(img_ids)
tr_r5 /= len(img_ids)
tr_r10 /= len(img_ids)
# tr_mean = (tr_r1 + tr_r5 + tr_r10) / 3
# ir_mean = (ir_r1 + ir_r5 + ir_r10) / 3
tr_mean = (tr_r1 + tr_r3 + tr_r5) / 3
ir_mean = (ir_r1 + ir_r3 + ir_r5) / 3
r_mean = (tr_mean + ir_mean) / 2
eval_log = {'qa2page_r1': ir_r1,
'qa2page_r3': ir_r3,
'qa2page_r5': ir_r5,
'qa2page_r10': ir_r10,
'qa2page_r_mean': ir_mean,
'qa_nums': len(txt_ids),
'page2qa_r1': tr_r1,
'page2qa_r3': tr_r3,
'page2qa_r5': tr_r5,
'page2qa_r10': tr_r10,
'page2qa_r_mean': tr_mean,
'page_nums': len(img_ids),
'r_mean': r_mean,
}
if return_top_imgs:
return eval_log, txt2topimg
else:
return eval_log
def merge_recall(all_metrics):
merged_metrics = defaultdict(int)
all_qa_nums = sum([m['qa_nums'] for m in all_metrics])
all_page_nums = sum([m['page_nums'] for m in all_metrics])
merged_metrics['qa_nums'] = all_qa_nums
merged_metrics['page_nums'] = all_page_nums
for name in ['qa2page_r1', 'qa2page_r3', 'qa2page_r5', 'qa2page_r10', 'qa2page_r_mean']:
merged_metrics[name] = sum([m[name]*m['page_nums'] for m in all_metrics]) / all_page_nums
for name in ['page2qa_r1', 'page2qa_r3', 'page2qa_r5', 'page2qa_r10', 'page2qa_r_mean']:
merged_metrics[name] = sum([m[name]*m['qa_nums'] for m in all_metrics]) / all_qa_nums
merged_metrics['r_mean'] = (merged_metrics['qa2page_r_mean']+merged_metrics['page2qa_r_mean']) / 2
return merged_metrics
def harmonic_mean(data):
total = 0
for i in data:
if i == 0:
return 0
total += 1/i
return len(data) / total
def boardcast_str(s, src_rank=0):
object_list = [s]
dist.broadcast_object_list(object_list=object_list, src=src_rank)
return object_list[0]
def gather_list(list_to_gather):
results = [None] * dist.get_world_size()
dist.all_gather_object(results, list_to_gather)
open_nest_results = []
for result in results:
open_nest_results.extend(result)
return open_nest_results
def gather_tensor(tensor_to_gather):
results = [None] * dist.get_world_size()
dist.all_gather_object(results, tensor_to_gather)
return results
def remove_repeat_sample(list_to_process, N_samples):
samples_per_rank = ceil((N_samples-dist.get_rank())/dist.get_world_size())
return list_to_process[:samples_per_rank]
def unique_index_and_value(dataids):
unique_dataids = []
unique_index = []
hashset = set()
for index, dataid in enumerate(dataids):
if dataid not in hashset:
hashset.add(dataid)
unique_dataids.append(dataid)
unique_index.append(index)
dataids = unique_dataids
return unique_dataids, unique_index
def set_seed(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def get_logger(filename=None):
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
if filename is not None:
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger
| 8,686 | 35.965957 | 114 | py |
MPMQA | MPMQA-master/train.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import time
import torch
import deepspeed
import torch.distributed as dist
from tqdm import tqdm
from utils import set_seed, get_logger, obj_to_cuda, save_ckpt, load_ckpt, boardcast_str, harmonic_mean
from parser import get_base_parser
from dataset.mqa_dataset import get_mqa_loader
from models.mqa_model import MQAT5Model
from evaluate import evaluate_ds
def train(args, model, train_loader, val_loader, test_loader, optimizer, logger, val_metric='ROUGE_L'):
logger.info('Start training')
total_step = 0
best_epoch = -1
best_score = -1
best_ckpt_path = None
ckpt_dir = os.path.join(args.output_dir, 'ckpts')
start_epoch = args.start_epoch
if args.checkpoint:
logger.info(f'Resume training from {args.checkpoint}')
logger.info(f'Start epoch {start_epoch}')
# assert args.start_epoch != 0
load_ckpt(args, args.checkpoint, model, logger, load_module_only=args.load_module_only)
metrics = evaluate_ds(args, model, val_loader, logger, save_fn=f'epoch_{start_epoch}.json', split='val')
if isinstance(val_metric, str):
val_score = metrics[val_metric]
elif isinstance(val_metric, list):
scores = [metrics[n] for n in val_metric]
if args.val_metric_aggregate == 'harmonic_mean':
val_score = harmonic_mean(scores)
elif args.val_metric_aggregate == 'mean':
val_score = sum(scores) / len(scores)
if ((not args.deepspeed) or dist.get_rank()==0) and val_score > best_score:
best_score = val_score
best_epoch = start_epoch
best_ckpt_path = os.path.join(ckpt_dir, f'checkpoint.{best_epoch}')
logger.info(f'Epoch {best_epoch} get best {args.val_metric_aggregate} score {val_metric}: {best_score}')
logger.info(f'Best checkpoint at {best_ckpt_path}')
# use for schedule sampling
total_step = (args.epoch - start_epoch) * len(train_loader)
now_step = start_epoch * len(train_loader)
for epoch in range(start_epoch, args.epoch):
model.train()
# model.eval() # for debug, DO NOT forget to remove
model.module.roi_extractor.eval()
# Set epoch must be called, otherwise the order of data in each epoch is the same
train_loader.sampler.set_epoch(epoch)
for step, batch in tqdm(enumerate(train_loader), ncols=50, total=len(train_loader)):
batch = obj_to_cuda(batch)
loss_dict = model(**batch, now_step=now_step, total_step=total_step)
loss = loss_dict['loss']
if args.deepspeed:
model.backward(loss)
model.step()
else:
optimizer.zero_grad()
loss.backward()
optimizer.step()
now_step += 1
if now_step % 100 == 0:
if args.deepspeed:
logger.info(f'Epoch: {epoch+1}/{args.epoch}, step: {step}/{len(train_loader)}, loss: {float(loss.detach().cpu())}')
else:
logger.info(f'Epoch: {epoch+1}/{args.epoch}, step: {step}/{len(train_loader)}, lr: {min(optimizer.get_lr())}-{max(optimizer.get_lr())}, loss: {float(loss.detach().cpu())}')
for loss_name, loss_value in loss_dict.items():
if loss_name == 'loss':
continue
logger.info(f'{loss_name}: {float(loss_value.detach().cpu())}')
if args.debug:
break
save_ckpt(args, model, None, args.output_dir, epoch=epoch+1, logger=logger)
metrics = evaluate_ds(args, model, val_loader, logger, save_fn=f'epoch_{epoch+1}.json', split='val')
if isinstance(val_metric, str):
val_score = metrics[val_metric]
elif isinstance(val_metric, list):
scores = [metrics[n] for n in val_metric]
val_score = harmonic_mean(scores)
elif args.val_metric_aggregate == 'mean':
val_score = sum(scores) / len(scores)
if val_score > best_score and ((not args.deepspeed) or dist.get_rank()==0):
best_score = val_score
best_epoch = epoch+1
best_ckpt_path = os.path.join(ckpt_dir, f'checkpoint.{best_epoch}')
logger.info(f'Epoch {best_epoch} get best {args.val_metric_aggregate} score {val_metric}: {best_score}')
logger.info(f'Best checkpoint at {best_ckpt_path}')
# If the checkpoint of previous epoch did not perform best, remove it.
if args.save_best_last and ((not args.deepspeed) or dist.get_rank()==0):
previous_epoch = epoch
previous_ckpt_path = os.path.join(ckpt_dir, f'checkpoint.{previous_epoch}')
if previous_epoch > start_epoch and previous_ckpt_path != best_ckpt_path:
logger.info(f'Remove {previous_ckpt_path} that does not preform best.')
cmd = f'rm -r {previous_ckpt_path}'
logger.info(f'Execute command: \n{cmd}')
os.system(cmd)
if (not args.deepspeed) or dist.get_rank() == 0:
logger.info(f'Epoch {best_epoch} get best {args.val_metric_aggregate} mean score {val_metric}: {best_score}')
logger.info(f'Load checkpoint {best_ckpt_path} to perform testing')
best_ckpt_path = boardcast_str(best_ckpt_path, src_rank=0)
load_ckpt(args, best_ckpt_path, model, logger=logger, load_module_only=True)
del train_loader
del val_loader
del optimizer
torch.cuda.empty_cache()
if args.deepspeed:
metrics = evaluate_ds(args, model, test_loader, logger, save_fn=f'epoch_{best_epoch}.json', split='test')
def get_parameter_group(args, model):
'''Get optimize parameter group;
'''
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
decay_param_tp = [(n, p) for n, p in param_optimizer if not any(nd in n for nd in no_decay)]
no_decay_param_tp = [(n, p) for n, p in param_optimizer if any(nd in n for nd in no_decay)]
weight_decay = args.weight_decay
optimizer_grouped_parameters = [
{'params': [p for _, p in decay_param_tp], 'weight_decay': weight_decay},
{'params': [p for _, p in no_decay_param_tp], 'weight_decay': 0.0},
]
return optimizer_grouped_parameters
def main(args):
set_seed(args.seed)
torch.cuda.set_device(args.local_rank)
os.makedirs(args.output_dir, exist_ok=True)
if args.deepspeed:
deepspeed.init_distributed()
nowtime = None
# Saving arguments
if not args.deepspeed or (args.deepspeed and dist.get_rank() == 0):
nowtime = time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime())
os.makedirs(os.path.join(args.output_dir, 'opt', nowtime), exist_ok=True)
with open(os.path.join(args.output_dir, 'opt', nowtime, 'config.json'), 'w') as f:
json.dump(args.__dict__, f, indent=1, ensure_ascii=False)
if args.deepspeed_config is not None:
os.system(f'cp {args.deepspeed_config} {os.path.join(args.output_dir, "opt", nowtime)}')
if args.deepspeed:
nowtime = boardcast_str(nowtime, src_rank=0)
logger = get_logger(os.path.join(args.output_dir, 'opt', nowtime, f'log.{dist.get_rank()}.txt'))
else:
logger = get_logger(os.path.join(args.output_dir, 'opt', nowtime, f'log.txt'))
logger.info(json.dumps(vars(args), indent=2))
model = MQAT5Model(args, pretrained_dir=args.pretrained_dir)
train_loader = get_mqa_loader(args, root=args.root, tokenizer=model.tokenizer, batch_size=args.batch_size, split='train', num_workers=args.n_workers)
val_loader = get_mqa_loader(args, root=args.root, tokenizer=model.tokenizer, batch_size=args.batch_size, split='val', num_workers=args.n_workers)
test_loader = get_mqa_loader(args, root=args.root, tokenizer=model.tokenizer, batch_size=args.inf_batch_size, split='test', num_workers=args.n_workers)
model.resize_token_embeddings()
model.cuda()
if args.deepspeed:
model, optimizer, _, _ = deepspeed.initialize(
args=args,
model=model,
model_parameters=get_parameter_group(args, model)
)
train(args, model, train_loader, val_loader, test_loader, optimizer, logger, val_metric=args.val_metric)
if __name__ == '__main__':
parser = get_base_parser()
args = parser.parse_args()
if args.config is not None:
args_dict = json.load(open(args.config, 'r', encoding='utf-8'))
for key, value in args_dict.items():
if key == 'local_rank':
continue
setattr(args, key, value)
if args.debug:
args.output_dir = "expr/debug"
main(args) | 9,408 | 45.122549 | 192 | py |
MPMQA | MPMQA-master/detector/inference.py | import os
import numpy as np
from xml.etree.ElementInclude import default_loader
import cv2
import random
from tqdm import tqdm
from detectron2.utils.visualizer import Visualizer
from pkg_resources import DefaultProvider
from detectron2.engine import DefaultPredictor
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.data import build_detection_test_loader
from bua.d2 import (
build_detection_test_loader_with_attributes
)
from bua import add_config
import bua.d2.modeling.roi_heads
from pdfparse.utils import doc2imgs
from evaluation import VGEvaluator
# import dataset.balloon
# from dataset.balloon import balloon_metadata
# from dataset.balloon import get_balloon_dicts
import dataset.publaynet
from dataset.publaynet import publaynet_metadata, get_publaynet_dicts
from dataset.vg import get_vg_dicts, vg_metadata
from detectron2.config import get_cfg
from detectron2.utils.visualizer import ColorMode
import argparse
def inference_doc(cfg, doc_fp, out_dir):
os.makedirs(out_dir, exist_ok=True)
imgs = doc2imgs(doc_fp)
predictor = DefaultPredictor(cfg)
for i, im in tqdm(enumerate(imgs), total=len(imgs), desc='Detecting'):
outputs = predictor(im)
v = Visualizer(im[:, :, ::-1],
metadata=publaynet_metadata,
scale=0.5,
instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models
)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2.imwrite(os.path.join(out_dir, str(i+1)+'.jpg'), out.get_image()[:,:,::-1])
def inference_dir(cfg, root, out_dir):
predictor = DefaultPredictor(cfg)
for img_name in os.listdir(root):
fp = os.path.join(root, img_name)
im = cv2.imread(fp)
outputs = predictor(im)
v = Visualizer(im[:, :, ::-1],
metadata=publaynet_metadata,
scale=0.5,
instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models
)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2.imwrite(os.path.join(out_dir, img_name), out.get_image()[:, :, ::-1])
def save_instances(instance, save_dir):
boxes = instance.pred_boxes
classes = instance.pred_classes
np.save(save_dir+'/boxes.npy', boxes.tensor.cpu().detach().numpy())
np.save(save_dir+'/classes.npy', classes.cpu().detach().numpy())
def inference(cfg):
sample_dir = os.path.join(cfg.OUTPUT_DIR, 'result_samples')
os.makedirs(sample_dir, exist_ok=True)
predictor = DefaultPredictor(cfg)
dataset_dicts = get_vg_dicts("data/VisualGenome", 'test')
for d in random.sample(dataset_dicts, 1):
im = cv2.imread(d["file_name"])
outputs = predictor(im) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format
v = Visualizer(im[:, :, ::-1],
metadata=vg_metadata,
scale=1,
# instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models
)
instance_dir = os.path.join(sample_dir, d['file_name'].split('/')[-1].split('.')[0])
os.makedirs(instance_dir, exist_ok=True)
save_instances(outputs['instances'], instance_dir)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2.imwrite(os.path.join(sample_dir, d['file_name'].split('/')[-1]), out.get_image()[:, :, ::-1])
def evaluate(cfg, args):
# cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") # path to the model we just trained
evaluator = VGEvaluator(args.dataset, output_dir='./output', cfg=cfg, distributed=False)
if cfg.MODEL.ATTRIBUTE_ON:
val_loader = build_detection_test_loader_with_attributes(cfg, args.dataset)
else:
val_loader = build_detection_test_loader(cfg, args.dataset)
predictor = DefaultPredictor(cfg)
print(inference_on_dataset(predictor.model, val_loader, evaluator))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input', default='./test//manual_s10.pdf')
parser.add_argument('--output', default='./test//manual_s10_detect/')
parser.add_argument('--mode', default='d2')
parser.add_argument('--thresh', default=0.7, type=float)
parser.add_argument('--dataset', default='visual_genome_test')
parser.add_argument('--config', default='./expr/publaynet-rcnn-3x/faster_rcnn_R_50_FPN_3x.yaml')
parser.add_argument('--model', default='./expr/publaynet-rcnn-3x/output/ckpts/model_final.pth')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg = get_cfg()
args = get_args()
add_config(args, cfg)
cfg.merge_from_file(args.config)
cfg.MODEL.WEIGHTS = args.model
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.thresh
# inference_doc(cfg, args.input, args.output)
inference(cfg)
# evaluate(cfg, args) | 5,169 | 40.693548 | 153 | py |
MPMQA | MPMQA-master/detector/setup.py | #!/usr/bin/env python
import glob
import os
from setuptools import find_packages, setup
import torch
from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension
torch_ver = [int(x) for x in torch.__version__.split(".")[:2]]
assert torch_ver >= [1, 3], "Requires PyTorch >= 1.3"
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "bua","caffe", "modeling","layers", "csrc")
main_source = os.path.join(extensions_dir, "vision.cpp")
sources = glob.glob(os.path.join(extensions_dir, "**", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "**", "*.cu")) + glob.glob(
os.path.join(extensions_dir, "*.cu")
)
sources = [main_source] + sources
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if (torch.cuda.is_available() and CUDA_HOME is not None) or os.getenv("FORCE_CUDA", "0") == "1":
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"bua.caffe.modeling._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
setup(
name="bottom-up-attention.pytorch",
packages=find_packages(exclude=("configs", "tests")),
python_requires=">=3.6",
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
| 1,911 | 27.537313 | 100 | py |
MPMQA | MPMQA-master/detector/train_det.py | import logging
import os
from collections import OrderedDict
import torch
from torch.nn.parallel import DistributedDataParallel
import detectron2.utils.comm as comm
import bua.d2.modeling.roi_heads
from bua import add_config
from detectron2.checkpoint import DetectionCheckpointer, PeriodicCheckpointer
from detectron2.config import get_cfg
from detectron2.data import (
MetadataCatalog,
build_detection_test_loader,
build_detection_train_loader,
)
from bua.d2 import (
build_detection_test_loader_with_attributes,
build_detection_train_loader_with_attributes
)
from detectron2.engine import default_argument_parser, default_setup, launch
from detectron2.evaluation import (
CityscapesInstanceEvaluator,
CityscapesSemSegEvaluator,
COCOEvaluator,
COCOPanopticEvaluator,
DatasetEvaluators,
LVISEvaluator,
PascalVOCDetectionEvaluator,
SemSegEvaluator,
inference_on_dataset,
print_csv_format,
)
from dataset import publaynet, balloon, vg
from detectron2.modeling import build_model
from detectron2.solver import build_lr_scheduler, build_optimizer
from detectron2.utils.events import EventStorage
logger = logging.getLogger("detectron2")
def get_evaluator(cfg, dataset_name, output_folder=None):
"""
Create evaluator(s) for a given dataset.
This uses the special metadata "evaluator_type" associated with each builtin dataset.
For your own dataset, you can simply create an evaluator manually in your
script and do not have to worry about the hacky if-else logic here.
"""
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
evaluator_list = []
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if evaluator_type in ["sem_seg", "coco_panoptic_seg"]:
evaluator_list.append(
SemSegEvaluator(
dataset_name,
distributed=True,
output_dir=output_folder,
)
)
if evaluator_type in ["coco", "coco_panoptic_seg"]:
evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder))
if evaluator_type == "coco_panoptic_seg":
evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))
if evaluator_type == "cityscapes_instance":
return CityscapesInstanceEvaluator(dataset_name)
if evaluator_type == "cityscapes_sem_seg":
return CityscapesSemSegEvaluator(dataset_name)
if evaluator_type == "pascal_voc":
return PascalVOCDetectionEvaluator(dataset_name)
if evaluator_type == "lvis":
return LVISEvaluator(dataset_name, cfg, True, output_folder)
if len(evaluator_list) == 0:
raise NotImplementedError(
"no Evaluator for the dataset {} with the type {}".format(dataset_name, evaluator_type)
)
if len(evaluator_list) == 1:
return evaluator_list[0]
return DatasetEvaluators(evaluator_list)
def do_test(cfg, model):
results = OrderedDict()
for dataset_name in cfg.DATASETS.TEST:
if cfg.MODEL.ATTRIBUTE_ON:
data_loader = build_detection_test_loader_with_attributes(cfg, dataset_name)
else:
data_loader = build_detection_test_loader(cfg, dataset_name)
evaluator = get_evaluator(
cfg, dataset_name, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name)
)
results_i = inference_on_dataset(model, data_loader, evaluator)
results[dataset_name] = results_i
if comm.is_main_process():
logger.info("Evaluation results for {} in csv format:".format(dataset_name))
print_csv_format(results_i)
if len(results) == 1:
results = list(results.values())[0]
return results
def do_train(cfg, model, resume=False):
model.train()
optimizer = build_optimizer(cfg, model)
scheduler = build_lr_scheduler(cfg, optimizer)
os.makedirs(os.path.join(cfg.OUTPUT_DIR, 'ckpts'), exist_ok=True)
checkpointer = DetectionCheckpointer(
model, os.path.join(cfg.OUTPUT_DIR, 'ckpts'), optimizer=optimizer, scheduler=scheduler
)
start_iter = (
checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get("iteration", -1) + 1
)
max_iter = cfg.SOLVER.MAX_ITER
periodic_checkpointer = PeriodicCheckpointer(
checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter
)
# writers = default_writers(cfg.OUTPUT_DIR, max_iter) if comm.is_main_process() else []
# compared to "train_net.py", we do not support accurate timing and
# precise BN here, because they are not trivial to implement in a small training loop
if cfg.MODEL.ATTRIBUTE_ON:
data_loader = build_detection_train_loader_with_attributes(cfg)
else:
data_loader = build_detection_train_loader(cfg)
logger.info("Starting training from iteration {}".format(start_iter))
with EventStorage(start_iter) as storage:
for data, iteration in zip(data_loader, range(start_iter, max_iter)):
# storage.iter = iteration
loss_dict = model(data)
losses = sum(loss_dict.values())
assert torch.isfinite(losses).all(), loss_dict
loss_dict_reduced = {k: v.item() for k, v in comm.reduce_dict(loss_dict).items()}
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
if comm.is_main_process():
storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
optimizer.step()
storage.put_scalar("lr", optimizer.param_groups[0]["lr"], smoothing_hint=False)
scheduler.step()
if (
cfg.TEST.EVAL_PERIOD > 0
and (iteration + 1) % cfg.TEST.EVAL_PERIOD == 0
and iteration != max_iter - 1
):
do_test(cfg, model)
# Compared to "train_net.py", the test results are not dumped to EventStorage
comm.synchronize()
# if iteration - start_iter > 5 and (
# (iteration + 1) % 20 == 0 or iteration == max_iter - 1
# ):
# for writer in writers:
# writer.write()
periodic_checkpointer.step(iteration)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_config(args, cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(
cfg, args
) # if you don't like any of the default setup, write your own setup code
return cfg
def main(args):
cfg = setup(args)
model = build_model(cfg)
logger.info("Model:\n{}".format(model))
if args.eval_only:
DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
cfg.MODEL.WEIGHTS, resume=args.resume
)
return do_test(cfg, model)
distributed = comm.get_world_size() > 1
if distributed:
model = DistributedDataParallel(
model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
)
do_train(cfg, model, resume=args.resume)
return do_test(cfg, model)
if __name__ == "__main__":
args = default_argument_parser().parse_args()
args.__setattr__('mode', 'd2')
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
) | 7,622 | 35.3 | 99 | py |
MPMQA | MPMQA-master/detector/utils.py | import fitz
import cv2
import numpy as np
from PIL import Image, ImageTk, ImageDraw
from PIL import ImageFont
class UnionFindSet:
def __init__(self, max_n=100):
self.parent = [i for i in range(max_n)]
def union(self, i, j):
self.parent[i] = self.find_parent(i)
self.parent[j] = self.parent[i]
def find_parent(self, i):
if i == self.parent[i]:
return i
else:
parent = self.find_parent(self.parent[i])
self.parent[i] = parent
return parent
def pil2cv(pil_img):
img = cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR)
return img
def cv2pil(cv2_img):
img = Image.fromarray(cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB))
return img
def page2img(page):
pix = page.get_pixmap()
mode = "RGBA" if pix.alpha else 'RGB'
img = Image.frombytes(mode, [pix.width, pix.height], pix.samples)
return img
def doc2imgs(doc_fp, img_type='cv2'):
doc = fitz.open(doc_fp)
images = []
for page in doc:
p_img = page2img(page)
if img_type == 'cv2':
p_img = pil2cv(p_img)
images.append(p_img)
return images
def merge_boxes(boxes):
x0s = [b[0] for b in boxes]
y0s = [b[1] for b in boxes]
x1s = [b[2] for b in boxes]
y1s = [b[3] for b in boxes]
return (min(x0s), min(y0s), max(x1s), max(y1s))
def is_vh_line_interact(line_a, line_b, margin=1):
a0, a1 = line_a
b0, b1 = line_b
if a0 > a1:
a0, a1 = a1, a0
if b0 > b1:
b0, b1 = b1, b0
if a1 < b0 - margin or a0 > b1 + margin:
return False
else:
return True
def equal_margin(a, b, margin=1):
if abs(a - b) <= margin:
return True
else:
return False
def is_two_rect_share_edge(rect_a, rect_b, margin=1):
xa0,ya0,xa1,ya1 = rect_a[0], rect_a[1], rect_a[2], rect_a[3]
xb0,yb0,xb1,yb1 = rect_b[0], rect_b[1], rect_b[2], rect_b[3]
# if is_vh_line_interact((xa0, xa1), (xb0,xb1)) or \
# is_vh_line_interact((xa0, xa1), (yb0,yb1)) or \
# is_vh_line_interact((ya0, ya1), (xb0,xb1)) or \
# is_vh_line_interact((ya0, ya1), (yb0,yb1)):
# return True
if ((equal_margin(ya0, yb0, margin) or equal_margin(ya0, yb1, margin) or equal_margin(ya1, yb0, margin) or equal_margin(ya1,yb1, margin)) and is_vh_line_interact((xa0, xa1), (xb0, xb1), margin)) or \
((equal_margin(xa0, xb0, margin) or equal_margin(xa0, xb1, margin) or equal_margin(xa1, xb0, margin) or equal_margin(xa1, xb1, margin)) and is_vh_line_interact((ya0, ya1), (yb0, yb1), margin)) :
return True
else:
return False
def rect_area(rect):
return (rect[2]-max(rect[0], 0)) * (rect[3] - max(rect[1], 0)) * 1.0
def intersect_area(rect_a, rect_b):
# rect_1 Rect(x_a_0,y_a_0, x_b_)
x_a_0, y_a_0, x_a_1, y_a_1 = rect_a
x_b_0, y_b_0, x_b_1, y_b_1 = rect_b
w_a = x_a_1 - x_a_0
h_a = y_a_1 - y_a_0
w_b = x_b_1 - x_b_0
h_b = y_b_1 - y_b_0
return max(0.0, min(y_a_1, y_b_1) - max(y_a_0, y_b_0)) * max(0.0, min(x_a_1, x_b_1) - max(x_a_0, x_b_0))
# x_a_0, y_a_0 = max(0, x_a_0), max(0, y_a_0)
# x_b_0, y_b_0 = max(0, x_b_0), max(0, y_b_0)
# if x_a_1 <= x_b_0 or y_a_1 <= y_b_0:
# return 0
# return min((x_a_1 - x_b_0), w_a, w_b) * min((y_a_1 - y_b_0), h_a, h_b) * 1.0
def union_ratio(rect_a, rect_b):
intersect = intersect_area(rect_a, rect_b)
rect_a_area = rect_area(rect_a)
if rect_a_area > 10:
return intersect_area(rect_a, rect_b) / (rect_area(rect_a) * 1.0)
else:
return 1
def iou(rect_a, rect_b):
union = rect_area(rect_a) + rect_area(rect_b) - intersect_area(rect_a, rect_b)
return intersect_area(rect_a, rect_b) / union
def filt_inner_drawing(drawings):
n = len(drawings)
out_index = []
for i in range(n):
draw_i = drawings[i]
rect_i = draw_i['rect']
for j in range(n):
if i == j:
continue
draw_j = drawings[j]
rect_j = draw_j['rect']
if union_ratio(rect_i, rect_j) >= 1:
temp = j
# if i == 8:
# import pdb;pdb.set_trace()
out_index.append(i)
break
filtered_drawings = []
for i, draw in enumerate(drawings):
if i not in out_index:
filtered_drawings.append(draw)
return filtered_drawings
def inside(rect_a, rect_b):
area_a = rect_area(rect_a)
area_b = rect_area(rect_b)
if area_a < area_b:
rect_a, rect_b = rect_b, rect_a
xa0,ya0,xa1,ya1 = rect_a[0], rect_a[1], rect_a[2], rect_a[3]
xb0,yb0,xb1,yb1 = rect_b[0], rect_b[1], rect_b[2], rect_b[3]
if xb0 >= xa0 and yb0 >= ya0 and xa1 >= xb1 and ya1 >= yb1:
return True
else:
return False
def pil_draw_bbox(img, xy, color=(255,0,0), label=None):
draw = ImageDraw.Draw(img)
draw.rectangle(xy, outline=color)
if label is not None:
draw.text((xy[0], xy[1]-10), label, fill=color)
return img | 5,106 | 31.119497 | 203 | py |
MPMQA | MPMQA-master/detector/ROIFeatExtractor.py | import numpy as np
import cv2
import torch
import torch.nn as nn
from detectron2.config import get_cfg
from detectron2.modeling import build_model
from detectron2.structures import ImageList, Boxes
from detectron2.checkpoint import DetectionCheckpointer
import sys
sys.path.append('detector')
from bua.d2 import add_attribute_config
class ROIFeatExtractor(nn.Module):
def __init__(self, model_cfg, weights, bua=False):
super().__init__()
self.cfg = get_cfg()
self.bua = bua
if self.bua:
add_attribute_config(self.cfg)
self.cfg.merge_from_file(model_cfg)
self.cfg.MODEL.WEIGHTS = weights
self.model = build_model(self.cfg)
checkpointer = DetectionCheckpointer(self.model)
checkpointer.load(self.cfg.MODEL.WEIGHTS)
def preprocess_images(self, images):
images = [img.to(self.model.device) for img in images]
# import pdb;pdb.set_trace()
images = [img.permute(2, 0, 1) for img in images]
images = [(x - self.model.pixel_mean) / self.model.pixel_std for x in images]
images = ImageList.from_tensors(images, self.model.backbone.size_divisibility)
return images
def convert_bbox(self, bboxes):
bboxes = [Boxes(b.to(self.model.device)) for b in bboxes]
return bboxes
def roi_align(self, grid_features, bboxes):
nbbs = [len(b) for b in bboxes]
if self.bua:
box_features = self.model.roi_heads._shared_roi_transform(
[grid_features[f] for f in self.model.roi_heads.in_features],
bboxes)
box_features = box_features.mean(dim=[2, 3])
else:
grid_features = [grid_features[f] for f in self.model.roi_heads.box_in_features]
box_features = self.model.roi_heads.box_pooler(grid_features, bboxes)
box_features = self.model.roi_heads.box_head(box_features)
box_features = box_features.split(nbbs)
return box_features
def forward(self, images, bboxes):
"""
args:
image - BGR image list [H x W x C, ]
bboxes - Boxes of each image, list [N x 4,]
"""
images = self.preprocess_images(images)
bboxes = self.convert_bbox(bboxes)
grid_features = self.model.backbone(images.tensor)
roi_features = self.roi_align(grid_features, bboxes)
return roi_features
def predict(self, roi_features):
predictions = []
logits = []
for f in roi_features:
logits.append(
self.model.roi_heads.box_predictor(f)[0]#.argmax(dim=-1)
)
for l in logits:
predictions.append(
l[:, :-1].argmax(dim=-1)
)
return predictions, logits
if __name__ == '__main__':
# extractor = ROIFeatExtrator('expr/vg-rcnn-3x/config.yaml', 'expr/vg-rcnn-3x/output/ckpts/model_final.pth')
extractor = ROIFeatExtrator('expr/vg-bua/config.yaml', 'pretrained/bua-d2-frcn-r101.pth', bua=True)
extractor.eval()
images = [torch.from_numpy(cv2.imread('2344092.jpg'))]
bboxes = [torch.from_numpy(np.load('expr/vg-rcnn-3x/output/result_samples/2344092/boxes.npy'))]
classes = [torch.from_numpy(np.load('expr/vg-rcnn-3x/output/result_samples/2344092/classes.npy'))]
images.append(torch.from_numpy(cv2.imread('data/VisualGenome/VG_100K/2368275.jpg')))
bboxes.append(torch.from_numpy(np.load('expr/vg-rcnn-3x/output/result_samples/2368275/boxes.npy')))
classes.append(torch.from_numpy(np.load('expr/vg-rcnn-3x/output/result_samples/2368275/classes.npy')))
with torch.no_grad():
roi_features = extractor(images, bboxes)
predictions, logits = extractor.predict(roi_features)
# predictions = predictions.detach().cpu()
import pdb;pdb.set_trace()
for c1, p1 in zip(classes, predictions):
if all(c1 == p1.detach().cpu()) != True:
import pdb;pdb.set_trace()
| 4,030 | 37.390476 | 112 | py |
MPMQA | MPMQA-master/detector/evaluation/vg_evaluation.py | import os, io
import numpy as np
import copy
import torch
import logging
import pickle as cPickle
import itertools
import contextlib
from pycocotools.coco import COCO
from collections import OrderedDict
from fvcore.common.file_io import PathManager
import detectron2.utils.comm as comm
from detectron2.data import MetadataCatalog
from detectron2.evaluation.evaluator import DatasetEvaluator
from detectron2.data.datasets.coco import convert_to_coco_json
from detectron2.evaluation.coco_evaluation import instances_to_coco_json
from .vg_eval import vg_eval
class VGEvaluator(DatasetEvaluator):
"""
Evaluate object proposal, instance detection
outputs using VG's metrics and APIs.
"""
def __init__(self, dataset_name, cfg, distributed, output_dir=None):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
cfg (CfgNode): config instance
distributed (True): if True, will collect results from all ranks for evaluation.
Otherwise, will evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instance_predictions.pth" a file in torch serialization
format that contains all the raw original predictions.
2. "coco_instances_results.json" a json file in COCO's result
format.
"""
self._tasks = self._tasks_from_config(cfg)
self._distributed = distributed
self._logger = logging.getLogger(__name__)
self._cpu_device = torch.device("cpu")
self._output_dir = output_dir
self._metadata = MetadataCatalog.get(dataset_name)
if not hasattr(self._metadata, "json_file"):
self._logger.warning(f"json_file was not found in MetaDataCatalog for '{dataset_name}'")
cache_path = os.path.join(output_dir, f"{dataset_name}_vg_format.json")
self._metadata.json_file = cache_path
convert_to_coco_json(dataset_name, cache_path)
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()):
self._coco_api = COCO(json_file)
self._classes = ['__background__']
self._class_to_ind = {}
self._class_to_ind[self._classes[0]] = 0
with open(os.path.join('evaluation/objects_vocab.txt')) as f:
count = 1
for object in f.readlines():
names = [n.lower().strip() for n in object.split(',')]
self._classes.append(names[0])
for n in names:
self._class_to_ind[n] = count
count += 1
# Load attributes
self._attributes = ['__no_attribute__']
self._attribute_to_ind = {}
self._attribute_to_ind[self._attributes[0]] = 0
with open(os.path.join('evaluation/attributes_vocab.txt')) as f:
count = 1
for att in f.readlines():
names = [n.lower().strip() for n in att.split(',')]
self._attributes.append(names[0])
for n in names:
self._attribute_to_ind[n] = count
count += 1
self.roidb, self.image_index = self.gt_roidb(self._coco_api)
def _tasks_from_config(self, cfg):
"""
Returns:
tuple[str]: tasks that can be evaluated under the given configuration.
"""
tasks = ("bbox",)
if cfg.MODEL.MASK_ON:
tasks = tasks + ("segm",)
if cfg.MODEL.KEYPOINT_ON:
tasks = tasks + ("keypoints",)
return tasks
def gt_roidb(self, dataset):
roidb = []
image_index = dataset.imgToAnns.keys()
for img_index in dataset.imgToAnns:
tmp_dict = {}
num_objs = len(dataset.imgToAnns[img_index])
bboxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_attributes = np.zeros((num_objs, 16), dtype=np.int32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
for ind, item in enumerate(dataset.imgToAnns[img_index]):
bboxes[ind, :] = item['bbox']
gt_classes[ind] = item['category_id'] + 1 # NOTE
for j, attr in enumerate(item['attribute_ids']):
gt_attributes[ind, j] = attr
bboxes[:, 2] = bboxes[:, 2] + bboxes[:, 0]
bboxes[:, 3] = bboxes[:, 3] + bboxes[:, 1]
tmp_dict['boxes'] = bboxes
tmp_dict['gt_attributes'] = gt_attributes
tmp_dict['gt_classes'] = gt_classes
roidb.append(tmp_dict)
return roidb, image_index
def reset(self):
self._predictions = []
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
It is a list of dict. Each dict corresponds to an image and
contains keys like "height", "width", "file_name", "image_id".
outputs: the outputs of a COCO model. It is a list of dicts with key
"instances" that contains :class:`Instances`.
"""
for input, output in zip(inputs, outputs):
prediction = {"image_id": input["image_id"]}
# TODO this is ugly
if "instances" in output:
instances = output["instances"].to(self._cpu_device)
prediction["boxes"] = instances.pred_boxes.tensor.numpy()
prediction["labels"] = instances.pred_classes.numpy()
prediction["scores"] = instances.scores.numpy()
self._predictions.append(prediction)
def evaluate(self):
if self._distributed:
comm.synchronize()
self._predictions = comm.gather(self._predictions, dst=0)
self._predictions = list(itertools.chain(*self._predictions))
if not comm.is_main_process():
return {}
# self._predictions = torch.load(os.path.join(self._output_dir, "instances_predictions.pth"))
if len(self._predictions) == 0:
self._logger.warning("[VGEvaluator] Did not receive valid predictions.")
return {}
if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "instances_predictions.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(self._predictions, f)
self._results = OrderedDict()
self._eval_vg()
# Copy so the caller can do whatever with results
return copy.deepcopy(self._results)
def _eval_vg(self):
self.write_voc_results_file(self._predictions, output_dir=self._output_dir)
self.do_python_eval(self._output_dir)
def write_voc_results_file(self, predictions, output_dir):
# preds = []
# for item in predictions:
# pred = {}
# pred['image_id'] = item['image_id']
# scores = item["scores"]
# labels = item["labels"]
# bbox = item["boxes"]
# for ind, instance in enumerate(item['instances']):
# scores[ind] = instance['score']
# labels[ind] = instance['category_id']
# bbox[ind, :] = instance['bbox'][:]
# pred['scores'] = scores
# pred['lables'] = labels
# pred['bbox'] = bbox
# preds.append(pred)
for cls_ind, cls in enumerate(self._classes):
if cls == '__background__':
continue
print('Writing "{}" vg result file'.format(cls))
filename = self.get_vg_results_file_template(output_dir).format(cls)
with open(filename, 'wt') as f:
for pred_ind, item in enumerate(predictions):
scores = item["scores"]
labels = item["labels"]+1
bbox = item["boxes"]
if cls_ind not in labels:
continue
dets = bbox[labels==cls_ind]
scores = scores[labels==cls_ind]
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(str(item["image_id"]), scores[k],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def get_vg_results_file_template(self, output_dir, pickle=True, eval_attributes = False):
filename = 'detections_vg'+'_{:s}.txt'
path = os.path.join(output_dir, filename)
return path
def do_python_eval(self, output_dir, pickle=True, eval_attributes = False):
# We re-use parts of the pascal voc python code for visual genome
aps = []
nposs = []
thresh = []
# The PASCAL VOC metric changed in 2010
use_07_metric = False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
# Load ground truth
if eval_attributes:
classes = self._attributes
else:
classes = self._classes
for i, cls in enumerate(classes):
if cls == '__background__' or cls == '__no_attribute__':
continue
filename = self.get_vg_results_file_template(output_dir).format(cls)
rec, prec, ap, scores, npos = vg_eval(
filename, self.roidb, self.image_index, i, ovthresh=0.5,
use_07_metric=use_07_metric, eval_attributes=eval_attributes)
# Determine per class detection thresholds that maximise f score
if npos > 1 and not (type(prec) == int and type(rec) == int and prec+rec ==0):
f = np.nan_to_num((prec * rec) / (prec + rec))
thresh += [scores[np.argmax(f)]]
else:
thresh += [0]
aps += [ap]
nposs += [float(npos)]
print('AP for {} = {:.4f} (npos={:,})'.format(cls, ap, npos))
if pickle:
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap,
'scores': scores, 'npos': npos}, f)
# Set thresh to mean for classes with poor results
thresh = np.array(thresh)
avg_thresh = np.mean(thresh[thresh != 0])
thresh[thresh == 0] = avg_thresh
if eval_attributes:
filename = 'attribute_thresholds_vg.txt'
else:
filename = 'object_thresholds_vg.txt'
path = os.path.join(output_dir, filename)
with open(path, 'wt') as f:
for i, cls in enumerate(classes[1:]):
f.write('{:s} {:.3f}\n'.format(cls, thresh[i]))
weights = np.array(nposs)
weights /= weights.sum()
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('Weighted Mean AP = {:.4f}'.format(np.average(aps, weights=weights)))
print('Mean Detection Threshold = {:.3f}'.format(avg_thresh))
# print('~~~~~~~~')
# print('Results:')
# for ap, npos in zip(aps, nposs):
# print('{:.3f}\t{:.3f}'.format(ap, npos))
# print('{:.3f}'.format(np.mean(aps)))
# print('~~~~~~~~')
# print('')
# print('--------------------------------------------------------------')
print('Results computed with the **unofficial** PASCAL VOC Python eval code.')
print('--------------------------------------------------------------') | 12,145 | 41.767606 | 101 | py |
MPMQA | MPMQA-master/detector/evaluation/__init__.py | from .vg_evaluation import VGEvaluator | 38 | 38 | 38 | py |
MPMQA | MPMQA-master/detector/evaluation/vg_eval.py | # --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Bharath Hariharan
# --------------------------------------------------------
import numpy as np
def vg_eval(detpath,
gt_roidb,
image_index,
classindex,
ovthresh=0.5,
use_07_metric=False,
eval_attributes=False):
"""rec, prec, ap, sorted_scores, npos = voc_eval(
detpath,
gt_roidb,
image_index,
classindex,
[ovthresh],
[use_07_metric])
Top level function that does the Visual Genome evaluation.
detpath: Path to detections
gt_roidb: List of ground truth structs.
image_index: List of image ids.
classindex: Category index
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# extract gt objects for this class
class_recs = {}
npos = 0
for item, imagename in zip(gt_roidb, image_index):
if eval_attributes:
bbox = item['boxes'][np.where(np.any(item['gt_attributes'].toarray() == classindex, axis=1))[0], :]
else:
bbox = item['boxes'][np.where(item['gt_classes'] == classindex)[0], :]
difficult = np.zeros((bbox.shape[0],)).astype(np.bool)
det = [False] * bbox.shape[0]
npos = npos + sum(~difficult)
class_recs[str(imagename)] = {'bbox': bbox,
'difficult': difficult,
'det': det}
if npos == 0:
# No ground truth examples
return 0, 0, 0, 0, npos
# read dets
with open(detpath, 'r') as f:
lines = f.readlines()
if len(lines) == 0:
# No detection examples
return 0, 0, 0, 0, npos
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = -np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
if image_ids[d] not in class_recs:
print(image_ids[d], detpath)
continue
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap, sorted_scores, npos
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap | 5,261 | 33.392157 | 111 | py |
MPMQA | MPMQA-master/detector/dataset/balloon.py | import os
import json
import cv2
import numpy as np
from detectron2.structures import BoxMode
# import some common libraries
import numpy as np
import os, json, cv2, random
# import some common detectron2 utilities
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
def get_balloon_dicts(img_dir):
json_file = os.path.join(img_dir, "via_region_data.json")
with open(json_file) as f:
imgs_anns = json.load(f)
dataset_dicts = []
for idx, v in enumerate(imgs_anns.values()):
record = {}
filename = os.path.join(img_dir, v['filename'])
height, width = cv2.imread(filename).shape[:2]
record['file_name'] = filename
record['image_id'] = idx
record['height'] = height
record['width'] = width
annos = v['regions']
objs = []
for _, anno in annos.items():
assert not anno['region_attributes']
anno = anno['shape_attributes']
px = anno["all_points_x"]
py = anno['all_points_y']
poly = [(x+0.5, y+0.5) for x, y in zip(px, py)]
poly = [p for x in poly for p in x]
obj = {
'bbox': [np.min(px), np.min(py), np.max(px), np.max(py)],
'bbox_mode': BoxMode.XYXY_ABS,
'segmentation': [poly],
'category_id': 0
}
objs.append(obj)
record['annotations'] = objs
dataset_dicts.append(record)
return dataset_dicts
for d in ['train', 'val']:
DatasetCatalog.register('balloon_'+d, lambda d=d: get_balloon_dicts('data/balloon/'+d))
MetadataCatalog.get('balloon_'+d).set(thing_classes=['balloon'])
balloon_metadata = MetadataCatalog.get('balloon_train')
if __name__ == '__main__':
dataset_dicts = get_balloon_dicts('data/balloon/train')
for i, d in enumerate(random.sample(dataset_dicts, 3)):
img = cv2.imread(d['file_name'])
visualizer = Visualizer(img[:,:,::-1], metadata=balloon_metadata, scale=0.5)
out = visualizer.draw_dataset_dict(d)
temp = d
cv2.imwrite(f'{i}.png', out.get_image()[:,:,::-1])
| 2,193 | 31.746269 | 91 | py |
MPMQA | MPMQA-master/detector/dataset/vg.py | import os
import json
from unicodedata import category
import cv2
import numpy as np
from collections import defaultdict
from detectron2.structures import BoxMode
# import some common libraries
import numpy as np
import os, json, cv2, random
# import some common detectron2 utilities
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
def get_vg_dicts(root, split):
json_file = os.path.join(root, 'annotations', f"visual_genome_{split}.json")
image_dir1 = os.path.join(root, 'VG_100K')
image_dir2 = os.path.join(root, 'VG_100K_2')
image_set1 = set(os.listdir(image_dir1))
image_set2 = set(os.listdir(image_dir2))
with open(json_file) as f:
data_dict = json.load(f)
image_list = data_dict['images']
anno_list = data_dict['annotations']
imgid2meta = {}
for item in image_list:
imgid2meta[item['id']] = item
imgid2anno = defaultdict(list)
for item in anno_list:
imgid2anno[item['image_id']].append(item)
# catid2name = {1: 'text', 2: 'title', 3: 'list', 4: 'table', 5: 'figure'}
dataset_dicts = []
for imgid, meta in imgid2meta.items():
record = {}
if meta['file_name'] in image_set1:
record['file_name'] = os.path.join(image_dir1, meta['file_name'])
elif meta['file_name'] in image_set2:
record['file_name'] = os.path.join(image_dir2, meta['file_name'])
else:
print(f'Image {record["file_name"]} not found')
raise FileNotFoundError
record['image_id'] = imgid
record['height'] = meta['height']
record['width'] = meta['width']
annos = imgid2anno[imgid]
for i in range(len(annos)):
annos[i]['bbox_mode'] = BoxMode.XYXY_ABS
x, y, h, w = annos[i]['bbox']
x1, y1, x2, y2 = x, y, x+h, y+w
annos[i]['bbox'] = (x1, y1, x2, y2)
record['annotations'] = annos
dataset_dicts.append(record)
return dataset_dicts
with open('data/VisualGenome/annotations/visual_genome_test.json', 'r') as f:
categories = json.load(f)['categories']
categories = [c['name'] for c in categories]
for d in ['train', 'val', 'test']:
DatasetCatalog.register('vg_'+d, lambda d=d: get_vg_dicts('data/VisualGenome/', split=d))
MetadataCatalog.get('vg_'+d).set(thing_classes=categories)
MetadataCatalog.get('vg_'+d).set(evaluator_type='coco')
del categories
vg_metadata = MetadataCatalog.get('vg_train')
if __name__ == '__main__':
dataset_dicts = get_vg_dicts('data/VisualGenome', 'train')
for i, d in enumerate(random.sample(dataset_dicts, 3)):
img = cv2.imread(d['file_name'])
visualizer = Visualizer(img[:,:,::-1], metadata=vg_metadata, scale=0.5)
out = visualizer.draw_dataset_dict(d)
temp = d
cv2.imwrite(f'{d["image_id"]}.jpg', out.get_image()[:,:,::-1])
| 2,929 | 34.731707 | 93 | py |
MPMQA | MPMQA-master/detector/dataset/publaynet.py | import os
import json
from unicodedata import category
import cv2
import numpy as np
from collections import defaultdict
from detectron2.structures import BoxMode
# import some common libraries
import numpy as np
import os, json, cv2, random
# import some common detectron2 utilities
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
def get_publaynet_dicts(root, split):
json_file = os.path.join(root, f"{split}.json")
image_dir = os.path.join(root, split)
with open(json_file) as f:
data_dict = json.load(f)
image_list = data_dict['images']
anno_list = data_dict['annotations']
imgid2meta = {}
for item in image_list:
imgid2meta[item['id']] = item
imgid2anno = defaultdict(list)
for item in anno_list:
imgid2anno[item['image_id']].append(item)
# catid2name = {1: 'text', 2: 'title', 3: 'list', 4: 'table', 5: 'figure'}
dataset_dicts = []
for imgid, meta in imgid2meta.items():
record = {}
record['file_name'] = os.path.join(image_dir, meta['file_name'])
record['image_id'] = imgid
record['height'] = meta['height']
record['width'] = meta['width']
annos = imgid2anno[imgid]
for i in range(len(annos)):
poly = annos[i]['segmentation'][0]
px = poly[::2]
py = poly[1::2]
annos[i]['bbox'] = [np.min(px), np.min(py), np.max(px), np.max(py)]
annos[i]['bbox_mode'] = BoxMode.XYXY_ABS
annos[i]['category_id'] -= 1
record['annotations'] = annos
dataset_dicts.append(record)
return dataset_dicts
for d in ['train', 'val']:
DatasetCatalog.register('publaynet_'+d, lambda d=d: get_publaynet_dicts('data/publaynet/', split=d))
MetadataCatalog.get('publaynet_'+d).set(thing_classes=['text', 'title', 'list', 'table', 'figure'])
MetadataCatalog.get('publaynet_'+d).set(evaluator_type='coco')
publaynet_metadata = MetadataCatalog.get('publaynet_train')
if __name__ == '__main__':
dataset_dicts = get_publaynet_dicts('data/publaynet', 'train')
for i, d in enumerate(random.sample(dataset_dicts, 3)):
img = cv2.imread(d['file_name'])
visualizer = Visualizer(img[:,:,::-1], metadata=publaynet_metadata, scale=0.5)
out = visualizer.draw_dataset_dict(d)
temp = d
cv2.imwrite(f'{i}.png', out.get_image()[:,:,::-1])
| 2,450 | 34.014286 | 104 | py |
MPMQA | MPMQA-master/detector/bua/visual_genome.py | # -*- coding: utf-8 -*-
import contextlib
import io
import logging
import os
from fvcore.common.file_io import PathManager
from fvcore.common.timer import Timer
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.structures import BoxMode
logger = logging.getLogger(__name__)
"""
load json for mode detectron2
"""
def load_coco_with_attributes_json(json_file,
image_root,
dataset_name=None,
extra_annotation_keys=None):
"""
Extend load_coco_json() with additional support for attributes
"""
from pycocotools.coco import COCO
timer = Timer()
json_file = PathManager.get_local_path(json_file)
with contextlib.redirect_stdout(io.StringIO()):
coco_api = COCO(json_file)
if timer.seconds() > 1:
logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
id_map = None
if dataset_name is not None:
meta = MetadataCatalog.get(dataset_name)
cat_ids = sorted(coco_api.getCatIds())
cats = coco_api.loadCats(cat_ids)
thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])]
meta.thing_classes = thing_classes
if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):
if "coco" not in dataset_name:
logger.warning(
"""
Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you.
"""
)
id_map = {v: i for i, v in enumerate(cat_ids)}
meta.thing_dataset_id_to_contiguous_id = id_map
img_ids = sorted(coco_api.imgs.keys())
imgs = coco_api.loadImgs(img_ids)
anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
if "minival" not in json_file:
ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format(
json_file
)
imgs_anns = list(zip(imgs, anns))
logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file))
dataset_dicts = []
ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (extra_annotation_keys or [])
num_instances_without_valid_segmentation = 0
for (img_dict, anno_dict_list) in imgs_anns:
record = {}
record["file_name"] = os.path.join(image_root, img_dict["file_name"])
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
image_id = record["image_id"] = img_dict["id"]
objs = []
for anno in anno_dict_list:
assert anno["image_id"] == image_id
assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.'
obj = {key: anno[key] for key in ann_keys if key in anno}
segm = anno.get("segmentation", None)
if segm:
if not isinstance(segm, dict):
segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
if len(segm) == 0:
num_instances_without_valid_segmentation += 1
continue
obj["segmentation"] = segm
keypts = anno.get("keypoints", None)
if keypts:
for idx, v in enumerate(keypts):
if idx % 3 != 2:
keypts[idx] = v + 0.5
obj["keypoints"] = keypts
attrs = anno.get("attribute_ids", None)
if attrs: # list[int]
obj["attribute_ids"] = attrs
attr = anno.get("attribute", None)
if attr:
# NOTE import from bua
# obj["attributes"] = attr # 正常读入
# obj["attribute_ids"] = attr # 正常读入
# print(attr)
max_attributes_per_ins = 16
attributes = [-1 for _ in range(max_attributes_per_ins)]
for idx, a in enumerate(attr):
attributes[idx] = a - 1 # bua train、val的json中attr类别是1-400
obj["attribute_ids"] = attributes
obj["bbox_mode"] = BoxMode.XYWH_ABS
if id_map:
obj["category_id"] = id_map[obj["category_id"]]
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
if num_instances_without_valid_segmentation > 0:
logger.warning(
"Filtered out {} instances without valid segmentation. "
"There might be issues in your dataset generation process.".format(
num_instances_without_valid_segmentation
)
)
return dataset_dicts
def register_coco_instances_with_attributes(name, metadata, json_file, image_root):
DatasetCatalog.register(name, lambda: load_coco_with_attributes_json(json_file,
image_root,
name))
MetadataCatalog.get(name).set(
json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
)
# ==== Predefined splits for visual genome images ===========
_PREDEFINED_SPLITS_VG = {
"visual_genome_train": ("VisualGenome/images",
"VisualGenome/annotations/visual_genome_train.json"),
"visual_genome_val": ("VisualGenome/images",
"VisualGenome/annotations/visual_genome_val.json"),
"visual_genome_test": ("VisualGenome/images",
"VisualGenome/annotations/visual_genome_test.json"),
}
def register_all_vg(root):
for key, (image_root, json_file) in _PREDEFINED_SPLITS_VG.items():
register_coco_instances_with_attributes(
key,
{}, # no meta data
os.path.join(root, json_file),
os.path.join(root, image_root),
)
# Register them all under "./datasets"
_root = os.getenv("DETECTRON2_DATASETS", "data")
register_all_vg(_root) | 6,168 | 36.615854 | 98 | py |
MPMQA | MPMQA-master/detector/bua/__init__.py | from .d2 import add_attribute_config
from .caffe import add_bottom_up_attention_config
def add_config(args, cfg):
if args.mode == "caffe":
add_bottom_up_attention_config(cfg, True)
elif args.mode == "d2":
add_attribute_config(cfg)
else:
raise Exception("detection model not supported: {}".format(args.model))
from . import visual_genome | 373 | 33 | 79 | py |
MPMQA | MPMQA-master/detector/bua/d2/config.py | # -*- coding: utf-8 -*-
from detectron2.config import CfgNode as CN
"""
config for mode detectron2
"""
def add_attribute_config(cfg):
"""
Add config for attribute prediction.
"""
# Whether to have attribute prediction
cfg.MODEL.ATTRIBUTE_ON = False
# Maximum number of attributes per foreground instance
cfg.INPUT.MAX_ATTR_PER_INS = 16
# ------------------------------------------------------------------------ #
# Attribute Head
# ----------------------------------------------------------------------- #
cfg.MODEL.ROI_ATTRIBUTE_HEAD = CN()
# Dimension for object class embedding, used in conjunction with
# visual features to predict attributes
cfg.MODEL.ROI_ATTRIBUTE_HEAD.OBJ_EMBED_DIM = 256
# Dimension of the hidden fc layer of the input visual features
cfg.MODEL.ROI_ATTRIBUTE_HEAD.FC_DIM = 512
# Loss weight for attribute prediction, 0.2 is best per analysis
cfg.MODEL.ROI_ATTRIBUTE_HEAD.LOSS_WEIGHT = 0.2
# Number of classes for attributes
cfg.MODEL.ROI_ATTRIBUTE_HEAD.NUM_CLASSES = 400
"""
Add config for box regression loss adjustment.
"""
# Loss weights for RPN box regression
cfg.MODEL.RPN.BBOX_LOSS_WEIGHT = 1.0
# Loss weights for R-CNN box regression
cfg.MODEL.ROI_BOX_HEAD.BBOX_LOSS_WEIGHT = 1.0
cfg.MODEL.EXTRACT_FEATS = False
cfg.MODEL.EXTRACT_MODE = 1
_C = cfg
_C.MODEL.BUA = CN()
_C.MODEL.BUA.EXTRACT_FEATS = False
_C.MODEL.BUA.EXTRACTOR = CN()
_C.MODEL.BUA.ATTRIBUTE_ON = False
# _C.MODEL.BUA.EXTRACT_FEATS = False
# EXTRACTOR.MODE {1: extract roi features, 2: extract bbox only ,3: extract roi features by gt_bbox}
_C.MODEL.BUA.EXTRACTOR.MODE = 1
# config of postprocessing in extractor
_C.MODEL.BUA.EXTRACTOR.MIN_BOXES = 10
_C.MODEL.BUA.EXTRACTOR.MAX_BOXES = 100
_C.MODEL.BUA.EXTRACTOR.CONF_THRESH = 0.2
_C.MODEL.BUA.EXTRACTOR.OUTPUT_DIR = ".output/" | 1,952 | 33.263158 | 104 | py |
MPMQA | MPMQA-master/detector/bua/d2/__init__.py | from .dataloader.build_loader import (
build_detection_train_loader_with_attributes,
build_detection_test_loader_with_attributes,
)
from .modeling.roi_heads import AttributeRes5ROIHeads
from .. import visual_genome
from .config import add_attribute_config | 263 | 36.714286 | 53 | py |
MPMQA | MPMQA-master/detector/bua/d2/modeling/roi_heads.py |
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.layers import ShapeSpec
from detectron2.modeling.roi_heads import (
build_box_head,
build_mask_head,
select_foreground_proposals,
ROI_HEADS_REGISTRY,
ROI_BOX_HEAD_REGISTRY,
ROIHeads,
Res5ROIHeads,
StandardROIHeads,
)
from detectron2.modeling.roi_heads.box_head import FastRCNNConvFCHead
from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
from detectron2.modeling.poolers import ROIPooler
from bua.caffe.modeling.box_regression import BUABox2BoxTransform
"""
roi head for mode detectron2
"""
@ROI_BOX_HEAD_REGISTRY.register()
class AttributeFastRCNNConvFCHead(FastRCNNConvFCHead):
"""
Modified version of FastRCNNConvFCHead which output last two FC outputs
"""
def forward(self, x):
for layer in self.conv_norm_relus:
x = layer(x)
y = None
if len(self.fcs):
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
for layer in self.fcs:
y = x
x = F.relu(layer(y))
return x, y
class AttributePredictor(nn.Module):
"""
Head for attribute prediction, including feature/score computation and
loss computation.
"""
def __init__(self, cfg, input_dim):
super().__init__()
# fmt: off
self.num_objs = cfg.MODEL.ROI_HEADS.NUM_CLASSES
self.obj_embed_dim = cfg.MODEL.ROI_ATTRIBUTE_HEAD.OBJ_EMBED_DIM
self.fc_dim = cfg.MODEL.ROI_ATTRIBUTE_HEAD.FC_DIM
self.num_attributes = cfg.MODEL.ROI_ATTRIBUTE_HEAD.NUM_CLASSES
self.max_attr_per_ins = cfg.INPUT.MAX_ATTR_PER_INS
self.loss_weight = cfg.MODEL.ROI_ATTRIBUTE_HEAD.LOSS_WEIGHT
# fmt: on
# object class embedding, including the background class
self.obj_embed = nn.Embedding(self.num_objs + 1, self.obj_embed_dim)
input_dim += self.obj_embed_dim
self.fc = nn.Sequential(
nn.Linear(input_dim, self.fc_dim),
nn.ReLU()
)
self.attr_score = nn.Linear(self.fc_dim, self.num_attributes)
nn.init.normal_(self.attr_score.weight, std=0.01)
nn.init.constant_(self.attr_score.bias, 0)
def forward(self, x, obj_labels):
attr_feat = torch.cat((x, self.obj_embed(obj_labels)), dim=1)
return self.attr_score(self.fc(attr_feat))
def loss(self, score, label):
n = score.shape[0]
score = score.unsqueeze(1)
score = score.expand(n, self.max_attr_per_ins, self.num_attributes).contiguous()
score = score.view(-1, self.num_attributes)
inv_weights = (
(label >= 0).sum(dim=1).repeat(self.max_attr_per_ins, 1).transpose(0, 1).flatten()
)
weights = inv_weights.float().reciprocal()
weights[weights > 1] = 0.
n_valid = len((label >= 0).sum(dim=1).nonzero())
label = label.view(-1)
attr_loss = F.cross_entropy(score, label, reduction="none", ignore_index=-1)
attr_loss = (attr_loss * weights).view(n, -1).sum(dim=1)
if n_valid > 0:
attr_loss = attr_loss.sum() * self.loss_weight / n_valid
else:
attr_loss = attr_loss.sum() * 0.
return {"loss_attr": attr_loss}
class AttributeROIHeads(ROIHeads):
"""
An extension of ROIHeads to include attribute prediction.
"""
def forward_attribute_score(self, box_features, obj_labels):
attribute_scores = self.attribute_predictor(box_features, obj_labels)
return attribute_scores
def forward_attribute_loss(self, proposals, box_features):
proposals, fg_selection_attributes = select_foreground_proposals(
proposals, self.num_classes
)
attribute_features = box_features[torch.cat(fg_selection_attributes, dim=0)]
obj_labels = torch.cat([p.gt_classes for p in proposals])
attribute_labels = torch.cat([p.gt_attributes for p in proposals], dim=0)
attribute_scores = self.attribute_predictor(attribute_features, obj_labels)
return self.attribute_predictor.loss(attribute_scores, attribute_labels)
def forward_attr(self, proposals, box_features):
proposals, fg_selection_attributes = select_foreground_proposals(
proposals, self.num_classes
)
attribute_features = box_features[torch.cat(fg_selection_attributes, dim=0)]
obj_labels = torch.cat([p.gt_classes for p in proposals])
attribute_labels = torch.cat([p.gt_attributes for p in proposals], dim=0)
attribute_scores = self.attribute_predictor(attribute_features, obj_labels)
return attribute_scores
@ROI_HEADS_REGISTRY.register()
class AttributeRes5ROIHeads(AttributeROIHeads, Res5ROIHeads):
"""
An extension of Res5ROIHeads to include attribute prediction.
"""
def __init__(self, cfg, input_shape):
# super(Res5ROIHeads, self).__init__(cfg, input_shape) # d2 0.1.1
super(Res5ROIHeads, self).__init__(cfg) # d2 0.2.1
# added to fit d2 0.2.1
self.in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
assert len(self.in_features) == 1
# fmt: off
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
pooler_scales = (1.0 / input_shape[self.in_features[0]].stride, )
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
self.mask_on = cfg.MODEL.MASK_ON
self.attribute_on = cfg.MODEL.BUA.ATTRIBUTE_ON
self.extract_on = cfg.MODEL.BUA.EXTRACT_FEATS
self.extractor_mode = cfg.MODEL.BUA.EXTRACTOR.MODE
# fmt: on
assert not cfg.MODEL.KEYPOINT_ON
self.pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
self.box2box_transform = BUABox2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS)
self.res5, out_channels = self._build_res5_block(cfg)
self.box_predictor = FastRCNNOutputLayers(
cfg, ShapeSpec(channels=out_channels, height=1, width=1)
)
if self.mask_on:
self.mask_head = build_mask_head(
cfg,
ShapeSpec(channels=out_channels, width=pooler_resolution, height=pooler_resolution),
)
if self.attribute_on:
self.attribute_predictor = AttributePredictor(cfg, out_channels)
def forward(self, images, features, proposals, targets=None):
del images
if self.training:
assert targets
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
proposal_boxes = [x.proposal_boxes for x in proposals]
box_features = self._shared_roi_transform(
[features[f] for f in self.in_features], proposal_boxes
)
feature_pooled = box_features.mean(dim=[2, 3])
predictions = self.box_predictor(feature_pooled)
if self.training:
del features
losses = self.box_predictor.losses(predictions, proposals)
if self.mask_on:
proposals, fg_selection_masks = select_foreground_proposals(
proposals, self.num_classes
)
mask_features = box_features[torch.cat(fg_selection_masks, dim=0)]
del box_features
losses.update(self.mask_head(mask_features, proposals))
if self.attribute_on:
losses.update(self.forward_attribute_loss(proposals, feature_pooled))
return [], losses
elif self.extract_on:
pred_class_logits, pred_proposal_deltas = predictions
# pred_class_logits = pred_class_logits[:, :-1] # background is last
cls_lables = torch.argmax(pred_class_logits, dim=1)
num_preds_per_image = [len(p) for p in proposals]
if self.extractor_mode == 1 or self.extractor_mode == 3:
if self.attribute_on:
attr_scores = self.forward_attribute_score(feature_pooled, cls_lables)
return proposal_boxes, self.predict_probs(pred_class_logits, num_preds_per_image), feature_pooled.split(num_preds_per_image, dim=0), attr_scores.split(num_preds_per_image, dim=0)
else:
return proposal_boxes, self.predict_probs(pred_class_logits, num_preds_per_image), feature_pooled.split(num_preds_per_image, dim=0)
elif self.extractor_mode == 2:
return self.predict_boxes(proposals, pred_proposal_deltas, num_preds_per_image), self.predict_probs(pred_class_logits, num_preds_per_image)
else:
raise ValueError('BUA.EXTRATOR.MODE ERROR')
else:
pred_instances, _ = self.box_predictor.inference(predictions, proposals)
pred_instances = self.forward_with_given_boxes(features, pred_instances)
return pred_instances, {}
def get_conv5_features(self, features):
features = [features[f] for f in self.in_features]
return self.res5(features[0])
def get_roi_features(self, features, proposals):
assert len(self.in_features) == 1
features = [features[f] for f in self.in_features]
box_features = self._shared_roi_transform(
features, [x.proposal_boxes for x in proposals]
)
pooled_features = box_features.mean(dim=[2, 3])
return box_features, pooled_features, None
def predict_boxes(self, proposals, pred_proposal_deltas, num_preds_per_image):
"""
Returns:
list[Tensor]: A list of Tensors of predicted class-specific or class-agnostic boxes
for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is
the number of predicted objects for image i and B is the box dimension (4 or 5)
"""
# Always use 1 image per worker during inference since this is the
# standard when reporting inference time in papers.
box_type = type(proposals[0].proposal_boxes)
# cat(..., dim=0) concatenates over all images in the batch
proposals = box_type.cat([p.proposal_boxes for p in proposals])
num_pred = len(proposals)
B = proposals.tensor.shape[1]
K = pred_proposal_deltas.shape[1] // B
boxes = self.box2box_transform.apply_deltas(
pred_proposal_deltas,
proposals.tensor,
)
return boxes.view(num_pred, K * B).split(num_preds_per_image, dim=0)
def predict_probs(self, pred_class_logits, num_preds_per_image):
"""
Returns:
list[Tensor]: A list of Tensors of predicted class probabilities for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i.
"""
probs = F.softmax(pred_class_logits, dim=-1)
probs = probs[:, :-1] # background is last
return probs.split(num_preds_per_image, dim=0)
@ROI_HEADS_REGISTRY.register()
class AttributeStandardROIHeads(AttributeROIHeads, StandardROIHeads):
"""
An extension of StandardROIHeads to include attribute prediction.
"""
def __init__(self, cfg, input_shape):
super(StandardROIHeads, self).__init__(cfg, input_shape)
self._init_box_head(cfg, input_shape)
self._init_mask_head(cfg, input_shape)
self._init_keypoint_head(cfg, input_shape)
def _init_box_head(self, cfg, input_shape):
# fmt: off
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
self.train_on_pred_boxes = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES
self.attribute_on = cfg.MODEL.ATTRIBUTE_ON
# fmt: on
in_channels = [input_shape[f].channels for f in self.in_features]
assert len(set(in_channels)) == 1, in_channels
in_channels = in_channels[0]
self.box_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
self.box_head = build_box_head(
cfg,
ShapeSpec(
channels=in_channels, height=pooler_resolution, width=pooler_resolution
),
)
self.box_predictor = FastRCNNOutputLayers(cfg, self.box_head.output_shape)
if self.attribute_on:
self.attribute_predictor = AttributePredictor(
cfg, self.box_head.output_shape.channels
)
def _forward_box(self, features, proposals):
features = [features[f] for f in self.in_features]
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
box_features, _ = self.box_head(box_features)
predictions = self.box_predictor(box_features)
if self.training:
if self.train_on_pred_boxes:
with torch.no_grad():
pred_boxes = self.box_predictor.predict_boxes_for_gt_classes(
predictions, proposals
)
for proposals_per_image, pred_boxes_per_image in zip(
proposals, pred_boxes
):
proposals_per_image.proposal_boxes = Boxes(pred_boxes_per_image)
losses = self.box_predictor.losses(predictions, proposals)
if self.attribute_on:
losses.update(self.forward_attribute_loss(proposals, box_features))
del box_features
return losses
else:
pred_instances, r_indices = self.box_predictor.inference(
predictions, proposals
)
return pred_instances[0], r_indices[0]
def get_conv5_features(self, features):
assert len(self.in_features) == 1
features = [features[f] for f in self.in_features]
return features[0]
def get_roi_features(self, features, proposals):
features = [features[f] for f in self.in_features]
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
fc7, fc6 = self.box_head(box_features)
return box_features, fc7, fc6 | 14,740 | 41.359195 | 198 | py |
MPMQA | MPMQA-master/detector/bua/d2/dataloader/build_loader.py |
import logging
import operator
import torch.utils.data
from detectron2.utils.comm import get_world_size
from detectron2.data import samplers
from detectron2.data.build import get_detection_dataset_dicts, worker_init_reset_seed, trivial_batch_collator
from detectron2.data.common import AspectRatioGroupedDataset, DatasetFromList, MapDataset
from .dataset_mapper import AttributeDatasetMapper
"""
data_loader for mode detectron2
"""
def build_detection_train_loader_with_attributes(cfg, mapper=None):
num_workers = get_world_size()
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert (
images_per_batch % num_workers == 0
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).".format(
images_per_batch, num_workers
)
assert (
images_per_batch >= num_workers
), "SOLVER.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).".format(
images_per_batch, num_workers
)
images_per_worker = images_per_batch // num_workers
# NOTE above is added
dataset_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
)
dataset = DatasetFromList(dataset_dicts, copy=False)
if mapper is None:
mapper = AttributeDatasetMapper(cfg, True)
dataset = MapDataset(dataset, mapper)
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
if sampler_name == "TrainingSampler":
sampler = samplers.TrainingSampler(len(dataset))
elif sampler_name == "RepeatFactorTrainingSampler":
sampler = samplers.RepeatFactorTrainingSampler(
dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD
)
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
if cfg.DATALOADER.ASPECT_RATIO_GROUPING:
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sampler,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=None,
collate_fn=operator.itemgetter(0),
worker_init_fn=worker_init_reset_seed,
)
data_loader = AspectRatioGroupedDataset(data_loader, images_per_worker)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, images_per_worker, drop_last=True
)
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
worker_init_fn=worker_init_reset_seed,
)
return data_loader
def build_detection_test_loader_with_attributes(cfg, dataset_name, mapper=None):
dataset_dicts = get_detection_dataset_dicts(
[dataset_name],
filter_empty=False,
proposal_files=[
cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)]
]
if cfg.MODEL.LOAD_PROPOSALS
else None,
)
dataset = DatasetFromList(dataset_dicts)
if mapper is None:
mapper = AttributeDatasetMapper(cfg, False)
dataset = MapDataset(dataset, mapper)
sampler = samplers.InferenceSampler(len(dataset))
batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 1, drop_last=False)
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
)
return data_loader | 3,864 | 34.458716 | 109 | py |
MPMQA | MPMQA-master/detector/bua/d2/dataloader/dataset_mapper.py |
import copy
import logging
import numpy as np
import torch
from fvcore.common.file_io import PathManager
from PIL import Image
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from detectron2.data import DatasetMapper
from detectron2.structures import (
BitMasks,
Boxes,
BoxMode,
Instances,
Keypoints,
PolygonMasks,
polygons_to_bitmask,
)
"""
data mapper for mode detecrton2
"""
def annotations_to_instances_with_attributes(annos,
image_size,
mask_format="polygon",
load_attributes=False,
max_attr_per_ins=16):
"""
Extend the function annotations_to_instances() to support attributes
"""
boxes = [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
target = Instances(image_size)
boxes = target.gt_boxes = Boxes(boxes)
boxes.clip(image_size)
classes = [obj["category_id"] for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
if len(annos) and "segmentation" in annos[0]:
segms = [obj["segmentation"] for obj in annos]
if mask_format == "polygon":
masks = PolygonMasks(segms)
else:
assert mask_format == "bitmask", mask_format
masks = []
for segm in segms:
if isinstance(segm, list):
# polygon
masks.append(polygons_to_bitmask(segm, *image_size))
elif isinstance(segm, dict):
# COCO RLE
masks.append(mask_util.decode(segm))
elif isinstance(segm, np.ndarray):
assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
segm.ndim
)
# mask array
masks.append(segm)
else:
raise ValueError(
"Cannot convert segmentation of type '{}' to BitMasks!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict, or a full-image segmentation mask "
"as a 2D ndarray.".format(type(segm))
)
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])
)
target.gt_masks = masks
if len(annos) and "keypoints" in annos[0]:
kpts = [obj.get("keypoints", []) for obj in annos]
target.gt_keypoints = Keypoints(kpts)
if len(annos) and load_attributes:
attributes = -torch.ones((len(annos), max_attr_per_ins), dtype=torch.int64)
for idx, anno in enumerate(annos):
if "attribute_ids" in anno:
for jdx, attr_id in enumerate(anno["attribute_ids"]):
attributes[idx, jdx] = attr_id
target.gt_attributes = attributes
return target
class AttributeDatasetMapper(DatasetMapper):
"""
Extend DatasetMapper to support attributes.
"""
def __init__(self, cfg, is_train=True):
super().__init__(cfg, is_train)
# fmt: off
self.attribute_on = cfg.MODEL.BUA.ATTRIBUTE_ON
self.max_attr_per_ins = cfg.INPUT.MAX_ATTR_PER_INS
# fmt: on
# NOTE Added to fit d202
if cfg.INPUT.CROP.ENABLED and is_train:
self.crop_gen = T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)
else:
self.crop_gen = None
self.tfm_gens = utils.build_transform_gen(cfg, is_train)
self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
self.mask_on = cfg.MODEL.MASK_ON
self.keypoint_on = cfg.MODEL.KEYPOINT_ON
self.mask_format = cfg.INPUT.MASK_FORMAT
# NOTE ok
def __call__(self, dataset_dict):
dataset_dict = copy.deepcopy(dataset_dict)
# NOTE Added to fit d202
image = utils.read_image(dataset_dict["file_name"], format=self.image_format) # image_format
# image = utils.read_image(dataset_dict["file_name"], format=self.img_format) # image_format
utils.check_image_size(dataset_dict, image)
if "annotations" not in dataset_dict:
image, transforms = T.apply_transform_gens(
([self.crop_gen] if self.crop_gen else []) + self.tfm_gens, image
)
else:
if self.crop_gen:
crop_tfm = utils.gen_crop_transform_with_instance(
self.crop_gen.get_crop_size(image.shape[:2]),
image.shape[:2],
np.random.choice(dataset_dict["annotations"]),
)
image = crop_tfm.apply_image(image)
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
if self.crop_gen:
transforms = crop_tfm + transforms
image_shape = image.shape[:2]
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if self.load_proposals:
utils.transform_proposals(
dataset_dict, image_shape, transforms, self.min_box_side_len, self.proposal_topk
)
if not self.is_train:
dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
if not self.keypoint_on:
anno.pop("keypoints", None)
if not self.attribute_on:
anno.pop("attribute_ids")
annos = [
utils.transform_instance_annotations(
obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = annotations_to_instances_with_attributes(
annos, image_shape, mask_format=self.mask_format,
load_attributes=self.attribute_on, max_attr_per_ins=self.max_attr_per_ins
)
if self.crop_gen and instances.has("gt_masks"):
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
dataset_dict["instances"] = utils.filter_empty_instances(instances)
if "sem_seg_file_name" in dataset_dict:
with PathManager.open(dataset_dict.pop("sem_seg_file_name"), "rb") as f:
sem_seg_gt = Image.open(f)
sem_seg_gt = np.asarray(sem_seg_gt, dtype="uint8")
sem_seg_gt = transforms.apply_segmentation(sem_seg_gt)
sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long"))
dataset_dict["sem_seg"] = sem_seg_gt
return dataset_dict
| 7,189 | 38.505495 | 100 | py |
MPMQA | MPMQA-master/detector/bua/d2/dataloader/__init__.py | from .build_loader import (
build_detection_train_loader_with_attributes,
build_detection_test_loader_with_attributes,
)
from ... import visual_genome | 158 | 30.8 | 49 | py |
MPMQA | MPMQA-master/detector/bua/caffe/config.py | # -*- coding: utf-8 -*-
from detectron2.config import CfgNode as CN
def add_bottom_up_attention_config(cfg, caffe=False):
"""
Add config for tridentnet.
"""
_C = cfg
_C.MODEL.BUA = CN()
_C.MODEL.BUA.CAFFE = caffe
_C.MODEL.BUA.RESNET_VERSION = 1
_C.MODEL.BUA.ATTRIBUTE_ON = False
_C.MODEL.BUA.EXTRACT_FEATS = False
_C.MODEL.BUA.RPN = CN()
# out_channels of conv for bottom-up-attentions RPN.
_C.MODEL.BUA.RPN.CONV_OUT_CHANNELS = 512
_C.MODEL.BUA.EXTRACTOR = CN()
# EXTRACTOR.MODE {1: extract roi features, 2: extract bbox only ,3: extract roi features by gt_bbox}
_C.MODEL.BUA.EXTRACTOR.MODE = 1
# config of postprocessing in extractor
_C.MODEL.BUA.EXTRACTOR.MIN_BOXES = 10
_C.MODEL.BUA.EXTRACTOR.MAX_BOXES = 100
_C.MODEL.BUA.EXTRACTOR.CONF_THRESH = 0.2
_C.MODEL.BUA.EXTRACTOR.OUTPUT_DIR = ".output/"
_C.MODEL.BUA.ATTRIBUTE = CN()
_C.MODEL.BUA.ATTRIBUTE.NUM_CLASSES = 401
| 969 | 25.944444 | 104 | py |
MPMQA | MPMQA-master/detector/bua/caffe/__init__.py | from .config import add_bottom_up_attention_config
from .modeling.backbone import build_bua_resnet_backbone
from .modeling.rcnn import GeneralizedBUARCNN
from .modeling.roi_heads import BUACaffeRes5ROIHeads
from .modeling.rpn import StandardBUARPNHead, BUARPN | 259 | 51 | 56 | py |
MPMQA | MPMQA-master/detector/bua/caffe/postprocessing.py |
import numpy as np
import torch
from detectron2.structures import Instances
from modeling.layers.nms import nms # BC-compat
def extractor_postprocess(boxes, scores, features_pooled, input_per_image, extractor):
"""
Resize the output instances.
The input images are often resized when entering an object detector.
As a result, we often need the outputs of the detector in a different
resolution from its inputs.
This function will resize the raw outputs of an R-CNN detector
to produce outputs according to the desired output resolution.
Args:
results (Instances): the raw outputs from the detector.
`results.image_size` contains the input image resolution the detector sees.
This object might be modified in-place.
output_height, output_width: the desired output resolution.
Returns:
Instances: the resized output from the model, based on the output resolution
"""
MIN_BOXES = extractor.MIN_BOXES
MAX_BOXES = extractor.MAX_BOXES
CONF_THRESH = extractor.CONF_THRESH
cur_device = scores.device
dets = boxes / input_per_image["im_scale"]
max_conf = torch.zeros((scores.shape[0])).to(cur_device)
for cls_ind in range(1, scores.shape[1]):
cls_scores = scores[:, cls_ind]
keep = nms(dets, cls_scores, 0.3)
max_conf[keep] = torch.where(cls_scores[keep] > max_conf[keep],
cls_scores[keep],
max_conf[keep])
keep_boxes = torch.nonzero(max_conf >= CONF_THRESH).flatten()
if len(keep_boxes) < MIN_BOXES:
keep_boxes = torch.argsort(max_conf, descending=True)[:MIN_BOXES]
elif len(keep_boxes) > MAX_BOXES:
keep_boxes = torch.argsort(max_conf, descending=True)[:MAX_BOXES]
# keep_boxes = torch.argsort(max_conf, descending=True)[:100]
# feat_list.append(feats[i][keep_boxes])
image_feat = features_pooled[keep_boxes]
image_bboxes = dets[keep_boxes]
return image_feat, image_bboxes | 2,055 | 36.381818 | 87 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/box_regression.py |
import math
import torch
from detectron2.structures import Boxes
from typing import List, Tuple, Union
# Value for clamping large dw and dh predictions. The heuristic is that we clamp
# such that dw and dh are no larger than what would transform a 16px box into a
# 1000px box (based on a small anchor, 16px, and a typical image size, 1000px).
_DEFAULT_SCALE_CLAMP = math.log(1000.0 / 16)
__all__ = ["BUABoxes", "BUABox2BoxTransform"]
class BUABoxes(Boxes):
"""
This structure stores a list of boxes as a Nx4 torch.Tensor.
It supports some common methods about boxes
(`area`, `clip`, `nonempty`, etc),
and also behaves like a Tensor
(support indexing, `to(device)`, `.device`, and iteration over all boxes)
Attributes:
tensor: float matrix of Nx4.
"""
BoxSizeType = Union[List[int], Tuple[int, int]]
def __init__(self, tensor: torch.Tensor):
super().__init__(tensor)
def clip(self, box_size: BoxSizeType) -> None:
"""
NOTE: In order to be the same as bottom-up-attention network, we have
defined the new clip function.
Clip (in place) the boxes by limiting x coordinates to the range [0, width]
and y coordinates to the range [0, height].
Args:
box_size (height, width): The clipping box's size.
"""
assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!"
TO_REMOVE = 1
h, w = box_size
self.tensor[:, 0].clamp_(min=0, max=w - TO_REMOVE)
self.tensor[:, 1].clamp_(min=0, max=h - TO_REMOVE)
self.tensor[:, 2].clamp_(min=0, max=w - TO_REMOVE)
self.tensor[:, 3].clamp_(min=0, max=h - TO_REMOVE)
def nonempty(self, threshold: int = 0) -> torch.Tensor:
"""
NOTE: In order to be the same as bottom-up-attention network, we have
defined the new nonempty function.
Find boxes that are non-empty.
A box is considered empty, if either of its side is no larger than threshold.
Returns:
Tensor:
a binary vector which represents whether each box is empty
(False) or non-empty (True).
"""
TO_REMOVE = 1
box = self.tensor
widths = box[:, 2] - box[:, 0] + TO_REMOVE
heights = box[:, 3] - box[:, 1] + TO_REMOVE
keep = (widths > threshold) & (heights > threshold)
return keep
def filter_boxes(self):
box = self.tensor
keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0])
return keep
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "Boxes":
"""
Returns:
BUABoxes: Create a new :class:`BUABoxes` by indexing.
The following usage are allowed:
1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.
2. `new_boxes = boxes[2:10]`: return a slice of boxes.
3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor
with `length = len(boxes)`. Nonzero elements in the vector will be selected.
Note that the returned Boxes might share storage with this Boxes,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return BUABoxes(self.tensor[item].view(1, -1))
b = self.tensor[item]
assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item)
return BUABoxes(b)
class BUABox2BoxTransform(object):
"""
The box-to-box transform defined in R-CNN. The transformation is parameterized
by 4 deltas: (dx, dy, dw, dh). The transformation scales the box's width and height
by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height).
"""
def __init__(self, weights, scale_clamp=_DEFAULT_SCALE_CLAMP):
"""
Args:
weights (4-element tuple): Scaling factors that are applied to the
(dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set
such that the deltas have unit variance; now they are treated as
hyperparameters of the system.
scale_clamp (float): When predicting deltas, the predicted box scaling
factors (dw and dh) are clamped such that they are <= scale_clamp.
"""
self.weights = weights
self.scale_clamp = scale_clamp
def get_deltas(self, src_boxes, target_boxes):
"""
Get box regression transformation deltas (dx, dy, dw, dh) that can be used
to transform the `src_boxes` into the `target_boxes`. That is, the relation
``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless
any delta is too large and is clamped).
Args:
src_boxes (Tensor): source boxes, e.g., object proposals
target_boxes (Tensor): target of the transformation, e.g., ground-truth
boxes.
"""
assert isinstance(src_boxes, torch.Tensor), type(src_boxes)
assert isinstance(target_boxes, torch.Tensor), type(target_boxes)
TO_REMOVE = 1 # TODO remove
src_widths = src_boxes[:, 2] - src_boxes[:, 0] + TO_REMOVE
src_heights = src_boxes[:, 3] - src_boxes[:, 1] + TO_REMOVE
src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths
src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights
target_widths = target_boxes[:, 2] - target_boxes[:, 0] + TO_REMOVE
target_heights = target_boxes[:, 3] - target_boxes[:, 1] + TO_REMOVE
target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths
target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights
wx, wy, ww, wh = self.weights
dx = wx * (target_ctr_x - src_ctr_x) / src_widths
dy = wy * (target_ctr_y - src_ctr_y) / src_heights
dw = ww * torch.log(target_widths / src_widths)
dh = wh * torch.log(target_heights / src_heights)
deltas = torch.stack((dx, dy, dw, dh), dim=1)
assert (src_widths > 0).all().item(), "Input boxes to Box2BoxTransform are not valid!"
return deltas
def apply_deltas(self, deltas, boxes):
"""
Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.
Args:
deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.
deltas[i] represents k potentially different class-specific
box transformations for the single box boxes[i].
boxes (Tensor): boxes to transform, of shape (N, 4)
"""
assert torch.isfinite(deltas).all().item(), "Box regression deltas become infinite or NaN!"
boxes = boxes.to(deltas.dtype)
TO_REMOVE = 1 # TODO remove
widths = boxes[:, 2] - boxes[:, 0] + TO_REMOVE
heights = boxes[:, 3] - boxes[:, 1] + TO_REMOVE
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = self.weights
dx = deltas[:, 0::4] / wx
dy = deltas[:, 1::4] / wy
dw = deltas[:, 2::4] / ww
dh = deltas[:, 3::4] / wh
# Prevent sending too large values into torch.exp()
dw = torch.clamp(dw, max=self.scale_clamp)
dh = torch.clamp(dh, max=self.scale_clamp)
pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
pred_w = torch.exp(dw) * widths[:, None]
pred_h = torch.exp(dh) * heights[:, None]
pred_boxes = torch.zeros_like(deltas)
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # x1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # y1
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w # x2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h # y2
return pred_boxes | 7,861 | 40.378947 | 99 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/fast_rcnn.py |
import logging
import numpy as np
import torch
from fvcore.nn import smooth_l1_loss
from torch import nn
from torch.nn import functional as F
from detectron2.layers import cat
from detectron2.structures import Instances
from detectron2.utils.events import get_event_storage
from detectron2.modeling.roi_heads import select_foreground_proposals
from detectron2.modeling.roi_heads.fast_rcnn import fast_rcnn_inference, fast_rcnn_inference_single_image, FastRCNNOutputs
from .layers.nms import batched_nms
from .box_regression import BUABoxes
logger = logging.getLogger(__name__)
"""
Shape shorthand in this module:
N: number of images in the minibatch
R: number of ROIs, combined over all images, in the minibatch
Ri: number of ROIs in image i
K: number of foreground classes. E.g.,there are 80 foreground classes in COCO.
Naming convention:
deltas: refers to the 4-d (dx, dy, dw, dh) deltas that parameterize the box2box
transform (see :class:`box_regression.Box2BoxTransform`).
pred_class_logits: predicted class scores in [-inf, +inf]; use
softmax(pred_class_logits) to estimate P(class).
gt_classes: ground-truth classification labels in [0, K], where [0, K) represent
foreground object classes and K represents the background class.
pred_proposal_deltas: predicted box2box transform deltas for transforming proposals
to detection box predictions.
gt_proposal_deltas: ground-truth box2box transform deltas
"""
class FastRCNNOutputs:
"""
An internal implementation that stores information about outputs of a Fast R-CNN head,
and provides methods that are used to decode the outputs of a Fast R-CNN head.
"""
def __init__(
self,
box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
smooth_l1_beta=0.0,
box_reg_loss_type="smooth_l1",
):
"""
Args:
box2box_transform (Box2BoxTransform/Box2BoxTransformRotated):
box2box transform instance for proposal-to-detection transformations.
pred_class_logits (Tensor): A tensor of shape (R, K + 1) storing the predicted class
logits for all R predicted object instances.
Each row corresponds to a predicted object instance.
pred_proposal_deltas (Tensor): A tensor of shape (R, K * B) or (R, B) for
class-specific or class-agnostic regression. It stores the predicted deltas that
transform proposals into final box detections.
B is the box dimension (4 or 5).
When B is 4, each row is [dx, dy, dw, dh (, ....)].
When B is 5, each row is [dx, dy, dw, dh, da (, ....)].
proposals (list[Instances]): A list of N Instances, where Instances i stores the
proposals for image i, in the field "proposal_boxes".
When training, each Instances must have ground-truth labels
stored in the field "gt_classes" and "gt_boxes".
The total number of all instances must be equal to R.
smooth_l1_beta (float): The transition point between L1 and L2 loss in
the smooth L1 loss function. When set to 0, the loss becomes L1. When
set to +inf, the loss becomes constant 0.
box_reg_loss_type (str): Box regression loss type. One of: "smooth_l1", "giou"
"""
self.box2box_transform = box2box_transform
self.num_preds_per_image = [len(p) for p in proposals]
self.pred_class_logits = pred_class_logits
self.pred_proposal_deltas = pred_proposal_deltas
self.smooth_l1_beta = smooth_l1_beta
self.box_reg_loss_type = box_reg_loss_type
self.image_shapes = [x.image_size for x in proposals]
if len(proposals):
box_type = type(proposals[0].proposal_boxes)
# cat(..., dim=0) concatenates over all images in the batch
self.proposals = box_type.cat([p.proposal_boxes for p in proposals])
assert (
not self.proposals.tensor.requires_grad
), "Proposals should not require gradients!"
# "gt_classes" exists if and only if training. But other gt fields may
# not necessarily exist in training for images that have no groundtruth.
if proposals[0].has("gt_classes"):
self.gt_classes = cat([p.gt_classes for p in proposals], dim=0)
# If "gt_boxes" does not exist, the proposals must be all negative and
# should not be included in regression loss computation.
# Here we just use proposal_boxes as an arbitrary placeholder because its
# value won't be used in self.box_reg_loss().
gt_boxes = [
p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes for p in proposals
]
self.gt_boxes = box_type.cat(gt_boxes)
else:
self.proposals = Boxes(torch.zeros(0, 4, device=self.pred_proposal_deltas.device))
self._no_instances = len(self.proposals) == 0 # no instances found
def softmax_cross_entropy_loss(self):
"""
Deprecated
"""
_log_classification_stats(self.pred_class_logits, self.gt_classes)
return cross_entropy(self.pred_class_logits, self.gt_classes, reduction="mean")
def box_reg_loss(self):
"""
Deprecated
"""
if self._no_instances:
return 0.0 * self.pred_proposal_deltas.sum()
box_dim = self.proposals.tensor.size(1) # 4 or 5
cls_agnostic_bbox_reg = self.pred_proposal_deltas.size(1) == box_dim
device = self.pred_proposal_deltas.device
bg_class_ind = self.pred_class_logits.shape[1] - 1
# Box delta loss is only computed between the prediction for the gt class k
# (if 0 <= k < bg_class_ind) and the target; there is no loss defined on predictions
# for non-gt classes and background.
# Empty fg_inds should produce a valid loss of zero because reduction=sum.
fg_inds = nonzero_tuple((self.gt_classes >= 0) & (self.gt_classes < bg_class_ind))[0]
if cls_agnostic_bbox_reg:
# pred_proposal_deltas only corresponds to foreground class for agnostic
gt_class_cols = torch.arange(box_dim, device=device)
else:
# pred_proposal_deltas for class k are located in columns [b * k : b * k + b],
# where b is the dimension of box representation (4 or 5)
# Note that compared to Detectron1,
# we do not perform bounding box regression for background classes.
gt_class_cols = box_dim * self.gt_classes[fg_inds, None] + torch.arange(
box_dim, device=device
)
if self.box_reg_loss_type == "smooth_l1":
gt_proposal_deltas = self.box2box_transform.get_deltas(
self.proposals.tensor, self.gt_boxes.tensor
)
loss_box_reg = smooth_l1_loss(
self.pred_proposal_deltas[fg_inds[:, None], gt_class_cols],
gt_proposal_deltas[fg_inds],
self.smooth_l1_beta,
reduction="sum",
)
elif self.box_reg_loss_type == "giou":
fg_pred_boxes = self.box2box_transform.apply_deltas(
self.pred_proposal_deltas[fg_inds[:, None], gt_class_cols],
self.proposals.tensor[fg_inds],
)
loss_box_reg = giou_loss(
fg_pred_boxes,
self.gt_boxes.tensor[fg_inds],
reduction="sum",
)
else:
raise ValueError(f"Invalid bbox reg loss type '{self.box_reg_loss_type}'")
loss_box_reg = loss_box_reg / self.gt_classes.numel()
return loss_box_reg
def losses(self):
"""
Deprecated
"""
return {"loss_cls": self.softmax_cross_entropy_loss(), "loss_box_reg": self.box_reg_loss()}
def predict_boxes(self):
"""
Deprecated
"""
pred = self.box2box_transform.apply_deltas(self.pred_proposal_deltas, self.proposals.tensor)
return pred.split(self.num_preds_per_image, dim=0)
def predict_probs(self):
"""
Deprecated
"""
probs = F.softmax(self.pred_class_logits, dim=-1)
return probs.split(self.num_preds_per_image, dim=0)
class BUACaffeFastRCNNOutputs(object):
"""
A class that stores information about outputs of a Fast R-CNN head.
"""
def __init__(
self, box2box_transform, pred_class_logits, pred_proposal_deltas, proposals, smooth_l1_beta, image_scales, attr_on=False
):
"""
Args:
box2box_transform (Box2BoxTransform/Box2BoxTransformRotated):
box2box transform instance for proposal-to-detection transformations.
pred_class_logits (Tensor): A tensor of shape (R, K + 1) storing the predicted class
logits for all R predicted object instances.
Each row corresponds to a predicted object instance.
pred_proposal_deltas (Tensor): A tensor of shape (R, K * B) or (R, B) for
class-specific or class-agnostic regression. It stores the predicted deltas that
transform proposals into final box detections.
B is the box dimension (4 or 5).
When B is 4, each row is [dx, dy, dw, dh (, ....)].
When B is 5, each row is [dx, dy, dw, dh, da (, ....)].
proposals (list[Instances]): A list of N Instances, where Instances i stores the
proposals for image i, in the field "proposal_boxes".
When training, each Instances must have ground-truth labels
stored in the field "gt_classes" and "gt_boxes".
smooth_l1_beta (float): The transition point between L1 and L2 loss in
the smooth L1 loss function. When set to 0, the loss becomes L1. When
set to +inf, the loss becomes constant 0.
"""
self.box2box_transform = box2box_transform
self.num_preds_per_image = [len(p) for p in proposals]
self.pred_class_logits = pred_class_logits
self.pred_proposal_deltas = pred_proposal_deltas
self.smooth_l1_beta = smooth_l1_beta
self.image_scales = image_scales
self.attr_on = attr_on
box_type = type(proposals[0].proposal_boxes)
# cat(..., dim=0) concatenates over all images in the batch
self.proposals = box_type.cat([p.proposal_boxes for p in proposals])
assert not self.proposals.tensor.requires_grad, "Proposals should not require gradients!"
self.image_shapes = [x.image_size for x in proposals]
# The following fields should exist only when training.
if proposals[0].has("gt_boxes"):
self.gt_boxes = box_type.cat([p.gt_boxes for p in proposals])
assert proposals[0].has("gt_classes")
self.gt_classes = cat([p.gt_classes for p in proposals], dim=0)
def fast_rcnn_inference(self, boxes, scores, image_shapes, image_scales, score_thresh, nms_thresh, topk_per_image):
"""
Call `fast_rcnn_inference_single_image` for all images.
Args:
boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
boxes for each image. Element i has shape (Ri, K * 4) if doing
class-specific regression, or (Ri, 4) if doing class-agnostic
regression, where Ri is the number of predicted objects for image i.
This is compatible with the output of :meth:`FastRCNNOutputs.predict_boxes`.
scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i. Compatible with the output of :meth:`FastRCNNOutputs.predict_probs`.
image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
score_thresh (float): Only return detections with a confidence score exceeding this
threshold.
nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
all detections.
Returns:
instances: (list[Instances]): A list of N instances, one for each image in the batch,
that stores the topk most confidence detections.
kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
the corresponding boxes/scores index in [0, Ri) from the input, for image i.
"""
result_per_image = [
self.fast_rcnn_inference_single_image(
boxes_per_image, scores_per_image, image_shape, image_scale, score_thresh, nms_thresh, topk_per_image
)
for scores_per_image, boxes_per_image, image_shape, image_scale in zip(scores, boxes, image_shapes, image_scales)
]
return tuple(list(x) for x in zip(*result_per_image))
def fast_rcnn_inference_single_image(
self, boxes, scores, image_shape, image_scale, score_thresh, nms_thresh, topk_per_image
):
"""
Single-image inference. Return bounding-box detection results by thresholding
on scores and applying non-maximum suppression (NMS).
Args:
Same as `fast_rcnn_inference`, but with boxes, scores, and image shapes
per image.
Returns:
Same as `fast_rcnn_inference`, but for only one image.
"""
scores = scores[:, 1:]
boxes = boxes[:, 4:]
num_bbox_reg_classes = boxes.shape[1] // 4
# Convert to Boxes to use the `clip` function ...
boxes = BUABoxes(boxes.reshape(-1, 4))
boxes.clip((image_shape[0]/image_scale, image_shape[1]/image_scale))
boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4
# Filter results based on detection scores
filter_mask = scores > score_thresh # R x K
# R' x 2. First column contains indices of the R predictions;
# Second column contains indices of classes.
filter_inds = filter_mask.nonzero()
if num_bbox_reg_classes == 1:
boxes = boxes[filter_inds[:, 0], 0]
else:
boxes = boxes[filter_mask]
scores = scores[filter_mask]
# Apply per-class NMS
keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh)
if topk_per_image >= 0:
keep = keep[:topk_per_image]
boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]
result = Instances(image_shape)
result.pred_boxes = BUABoxes(boxes)
result.scores = scores
result.pred_classes = filter_inds[:, 1]
return result, filter_inds[:, 0]
def predict_boxes(self):
"""
Returns:
list[Tensor]: A list of Tensors of predicted class-specific or class-agnostic boxes
for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is
the number of predicted objects for image i and B is the box dimension (4 or 5)
"""
# Always use 1 image per worker during inference since this is the
# standard when reporting inference time in papers.
self.proposals.scale(1.0/self.image_scales[0], 1.0/self.image_scales[0])
num_pred = len(self.proposals)
B = self.proposals.tensor.shape[1]
K = self.pred_proposal_deltas.shape[1] // B
boxes = self.box2box_transform.apply_deltas(
self.pred_proposal_deltas,
self.proposals.tensor,
)
return boxes.view(num_pred, K * B).split(self.num_preds_per_image, dim=0)
def predict_probs(self):
"""
Returns:
list[Tensor]: A list of Tensors of predicted class probabilities for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i.
"""
probs = F.softmax(self.pred_class_logits, dim=-1)
return probs.split(self.num_preds_per_image, dim=0)
def inference(self, score_thresh, nms_thresh, topk_per_image):
"""
Args:
score_thresh (float): same as fast_rcnn_inference.
nms_thresh (float): same as fast_rcnn_inference.
topk_per_image (int): same as fast_rcnn_inference.
Returns:
list[Instances]: same as fast_rcnn_inference.
list[Tensor]: same as fast_rcnn_inference.
"""
boxes = self.predict_boxes()
scores = self.predict_probs()
image_shapes = self.image_shapes
image_scales = self.image_scales
return self.fast_rcnn_inference(
boxes, scores, image_shapes, image_scales, score_thresh, nms_thresh, topk_per_image
)
class BUACaffeFastRCNNOutputLayers(nn.Module):
"""
Two linear layers for predicting Fast R-CNN outputs:
(1) proposal-to-detection box regression deltas
(2) classification scores
"""
def __init__(self, input_size, num_classes, cls_agnostic_bbox_reg, box_dim=4, attr_on=False, num_attr_classes=401):
"""
Args:
input_size (int): channels, or (channels, height, width)
num_classes (int): number of foreground classes
cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression
box_dim (int): the dimension of bounding boxes.
Example box dimensions: 4 for regular XYXY boxes and 5 for rotated XYWHA boxes
"""
super(BUACaffeFastRCNNOutputLayers, self).__init__()
if not isinstance(input_size, int):
input_size = np.prod(input_size)
self.attr_on = attr_on
# The prediction layer for num_classes foreground classes and one background class
# (hence + 1)
self.cls_score = nn.Linear(input_size, num_classes)
num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes
self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
if self.attr_on:
self.cls_embed = nn.Embedding(num_classes, 256)
self.attr_linear1 = nn.Linear(input_size + 256, 512)
self.attr_linear2 = nn.Linear(512, num_attr_classes)
nn.init.normal_(self.cls_embed.weight, std=0.01)
nn.init.normal_(self.attr_linear1.weight, std=0.01)
nn.init.normal_(self.attr_linear2.weight, std=0.01)
nn.init.constant_(self.attr_linear1.bias, 0)
nn.init.constant_(self.attr_linear2.bias, 0)
def forward(self, x, proposal_boxes=None):
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
scores = self.cls_score(x)
proposal_deltas = self.bbox_pred(x)
if self.attr_on:
# get labels and indices of proposals with foreground
all_labels = torch.argmax(scores, dim=1)
# get embeddings of indices using gt cls labels
cls_embed_out = self.cls_embed(all_labels)
# concat with fc7 feats
concat_attr = cat([x, cls_embed_out], dim=1)
# pass through attr head layers
fc_attr = self.attr_linear1(concat_attr)
attr_score = F.softmax(self.attr_linear2(F.relu(fc_attr)), dim=-1)
return scores, proposal_deltas, attr_score
return scores, proposal_deltas
class BUADetection2FastRCNNOutputs(FastRCNNOutputs):
"""
A class that stores information about outputs of a Fast R-CNN head.
"""
def __init__(
self, box2box_transform, pred_class_logits, pred_proposal_deltas, proposals, smooth_l1_beta, attr_on=False, pred_attribute_logits=None, num_attr_classes=400, gt_attributes=None
):
"""
Args:
box2box_transform (Box2BoxTransform/Box2BoxTransformRotated):
box2box transform instance for proposal-to-detection transformations.
pred_class_logits (Tensor): A tensor of shape (R, K + 1) storing the predicted class
logits for all R predicted object instances.
Each row corresponds to a predicted object instance.
pred_proposal_deltas (Tensor): A tensor of shape (R, K * B) or (R, B) for
class-specific or class-agnostic regression. It stores the predicted deltas that
transform proposals into final box detections.
B is the box dimension (4 or 5).
When B is 4, each row is [dx, dy, dw, dh (, ....)].
When B is 5, each row is [dx, dy, dw, dh, da (, ....)].
pred_attribute_logits (Tensor:) A tensor of shape (R, C) storing the predicted attribute
logits for all R predicted object instances.
proposals (list[Instances]): A list of N Instances, where Instances i stores the
proposals for image i, in the field "proposal_boxes".
When training, each Instances must have ground-truth labels
stored in the field "gt_classes" and "gt_boxes".
smooth_l1_beta (float): The transition point between L1 and L2 loss in
the smooth L1 loss function. When set to 0, the loss becomes L1. When
set to +inf, the loss becomes constant 0.
"""
self.attr_on = attr_on
self.box2box_transform = box2box_transform
self.num_preds_per_image = [len(p) for p in proposals]
self.pred_class_logits = pred_class_logits
self.pred_proposal_deltas = pred_proposal_deltas
if self.attr_on:
self.pred_attribute_logits = pred_attribute_logits
self.gt_attributes = gt_attributes
self.smooth_l1_beta = smooth_l1_beta
box_type = type(proposals[0].proposal_boxes)
# cat(..., dim=0) concatenates over all images in the batch
self.proposals = box_type.cat([p.proposal_boxes for p in proposals])
assert not self.proposals.tensor.requires_grad, "Proposals should not require gradients!"
self.image_shapes = [x.image_size for x in proposals]
self.num_attr_classes = num_attr_classes
# The following fields should exist only when training.
if proposals[0].has("gt_boxes"):
self.gt_boxes = box_type.cat([p.gt_boxes for p in proposals])
assert proposals[0].has("gt_classes")
self.gt_classes = cat([p.gt_classes for p in proposals], dim=0)
def _log_accuracy(self):
"""
Log the accuracy metrics to EventStorage.
"""
num_instances = self.gt_classes.numel()
pred_classes = self.pred_class_logits.argmax(dim=1)
bg_class_ind = self.pred_class_logits.shape[1] - 1
fg_inds = (self.gt_classes >= 0) & (self.gt_classes < bg_class_ind)
num_fg = fg_inds.nonzero().numel()
fg_gt_classes = self.gt_classes[fg_inds]
fg_pred_classes = pred_classes[fg_inds]
num_false_negative = (fg_pred_classes == bg_class_ind).nonzero().numel()
num_accurate = (pred_classes == self.gt_classes).nonzero().numel()
fg_num_accurate = (fg_pred_classes == fg_gt_classes).nonzero().numel()
storage = get_event_storage()
storage.put_scalar("fast_rcnn/cls_accuracy", num_accurate / num_instances)
if num_fg > 0:
storage.put_scalar("fast_rcnn/fg_cls_accuracy", fg_num_accurate / num_fg)
storage.put_scalar("fast_rcnn/false_negative", num_false_negative / num_fg)
def softmax_cross_entropy_loss(self):
"""
Compute the softmax cross entropy loss for box classification.
Returns:
scalar Tensor
"""
self._log_accuracy()
return F.cross_entropy(self.pred_class_logits, self.gt_classes, reduction="mean")
def smooth_l1_loss(self):
"""
Compute the smooth L1 loss for box regression.
Returns:
scalar Tensor
"""
gt_proposal_deltas = self.box2box_transform.get_deltas(
self.proposals.tensor, self.gt_boxes.tensor
)
box_dim = gt_proposal_deltas.size(1) # 4 or 5
cls_agnostic_bbox_reg = self.pred_proposal_deltas.size(1) == box_dim
device = self.pred_proposal_deltas.device
bg_class_ind = self.pred_class_logits.shape[1] - 1
# Box delta loss is only computed between the prediction for the gt class k
# (if 0 <= k < bg_class_ind) and the target; there is no loss defined on predictions
# for non-gt classes and background.
# Empty fg_inds produces a valid loss of zero as long as the size_average
# arg to smooth_l1_loss is False (otherwise it uses torch.mean internally
# and would produce a nan loss).
fg_inds = torch.nonzero((self.gt_classes >= 0) & (self.gt_classes < bg_class_ind)).squeeze(
1
)
if cls_agnostic_bbox_reg:
# pred_proposal_deltas only corresponds to foreground class for agnostic
gt_class_cols = torch.arange(box_dim, device=device)
else:
fg_gt_classes = self.gt_classes[fg_inds]
# pred_proposal_deltas for class k are located in columns [b * k : b * k + b],
# where b is the dimension of box representation (4 or 5)
# Note that compared to Detectron1,
# we do not perform bounding box regression for background classes.
gt_class_cols = box_dim * fg_gt_classes[:, None] + torch.arange(box_dim, device=device)
loss_box_reg = smooth_l1_loss(
self.pred_proposal_deltas[fg_inds[:, None], gt_class_cols],
gt_proposal_deltas[fg_inds],
self.smooth_l1_beta,
reduction="sum",
)
# The loss is normalized using the total number of regions (R), not the number
# of foreground regions even though the box regression loss is only defined on
# foreground regions. Why? Because doing so gives equal training influence to
# each foreground example. To see how, consider two different minibatches:
# (1) Contains a single foreground region
# (2) Contains 100 foreground regions
# If we normalize by the number of foreground regions, the single example in
# minibatch (1) will be given 100 times as much influence as each foreground
# example in minibatch (2). Normalizing by the total number of regions, R,
# means that the single example in minibatch (1) and each of the 100 examples
# in minibatch (2) are given equal influence.
loss_box_reg = loss_box_reg / self.gt_classes.numel()
return loss_box_reg
def attribute_loss(self):
fg_gt_attributes = self.gt_attributes
n_boxes = self.pred_attribute_logits.shape[0]
self.pred_attribute_logits = self.pred_attribute_logits.unsqueeze(1)
self.pred_attribute_logits = self.pred_attribute_logits.expand(n_boxes, 16, self.num_attr_classes).contiguous().view(-1, self.num_attr_classes)
inv_per_box_weights = (
(fg_gt_attributes >= 0).sum(dim=1).repeat(16, 1).transpose(0, 1).flatten()
)
per_box_weights = inv_per_box_weights.float().reciprocal()
per_box_weights[per_box_weights > 1] = 0.0
fg_gt_attributes = fg_gt_attributes.view(-1)
attributes_loss = 0.5 * F.cross_entropy(
self.pred_attribute_logits, fg_gt_attributes, reduction="none", ignore_index=-1
)
attributes_loss = (attributes_loss * per_box_weights).view(n_boxes, -1).sum(dim=1)
n_valid_boxes = len(attributes_loss.nonzero())
if n_valid_boxes > 0:
attributes_loss = (attributes_loss / n_valid_boxes).sum()
else:
attributes_loss = (attributes_loss * 0.0).sum()
return attributes_loss
def losses(self):
"""
Compute the default losses for box head in Fast(er) R-CNN,
with softmax cross entropy loss and smooth L1 loss.
Returns:
A dict of losses (scalar tensors) containing keys "loss_cls" and "loss_box_reg".
"""
return {
"loss_cls": self.softmax_cross_entropy_loss(),
"loss_box_reg": self.smooth_l1_loss(),
"loss_attr": self.attribute_loss() if self.attr_on else 0.,
}
def predict_boxes(self):
"""
Returns:
list[Tensor]: A list of Tensors of predicted class-specific or class-agnostic boxes
for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is
the number of predicted objects for image i and B is the box dimension (4 or 5)
"""
num_pred = len(self.proposals)
B = self.proposals.tensor.shape[1]
K = self.pred_proposal_deltas.shape[1] // B
boxes = self.box2box_transform.apply_deltas(
self.pred_proposal_deltas.view(num_pred * K, B),
self.proposals.tensor.unsqueeze(1).expand(num_pred, K, B).reshape(-1, B),
)
return boxes.view(num_pred, K * B).split(self.num_preds_per_image, dim=0)
def predict_probs(self):
"""
Returns:
list[Tensor]: A list of Tensors of predicted class probabilities for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i.
"""
probs = F.softmax(self.pred_class_logits, dim=-1)
return probs.split(self.num_preds_per_image, dim=0)
def inference(self, score_thresh, nms_thresh, topk_per_image):
"""
Args:
score_thresh (float): same as fast_rcnn_inference.
nms_thresh (float): same as fast_rcnn_inference.
topk_per_image (int): same as fast_rcnn_inference.
Returns:
list[Instances]: same as fast_rcnn_inference.
list[Tensor]: same as fast_rcnn_inference.
"""
boxes = self.predict_boxes()
scores = self.predict_probs()
image_shapes = self.image_shapes
return fast_rcnn_inference(
boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image
)
class BUADetectron2FastRCNNOutputLayers(nn.Module):
"""
Two linear layers for predicting Fast R-CNN outputs:
(1) proposal-to-detection box regression deltas
(2) classification scores
"""
def __init__(self, input_size, num_classes, cls_agnostic_bbox_reg, box_dim=4, attr_on=False, num_attr_classes=400):
"""
Args:
input_size (int): channels, or (channels, height, width)
num_classes (int): number of foreground classes
cls_agnostic_bbox_reg (bool): whether to use class agnostic for bbox regression
box_dim (int): the dimension of bounding boxes.
Example box dimensions: 4 for regular XYXY boxes and 5 for rotated XYWHA boxes
"""
super(BUADetectron2FastRCNNOutputLayers, self).__init__()
self.attr_on = attr_on
self.num_classes = num_classes
self.num_attr_classes = num_attr_classes
if not isinstance(input_size, int):
input_size = np.prod(input_size)
# The prediction layer for num_classes foreground classes and one background class
# (hence + 1)
self.cls_score = nn.Linear(input_size, num_classes + 1)
num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes
self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim)
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
if self.attr_on:
self.cls_embed = nn.Embedding(num_classes+1, 256)
self.attr_linear1 = nn.Linear(input_size + 256, 512)
self.attr_linear2 = nn.Linear(512, num_attr_classes)
# nn.init.normal_(self.cls_embed.weight, std=0.01)
nn.init.normal_(self.attr_linear1.weight, std=0.01)
nn.init.normal_(self.attr_linear2.weight, std=0.01)
nn.init.constant_(self.attr_linear1.bias, 0)
nn.init.constant_(self.attr_linear2.bias, 0)
def forward(self, x, proposal_boxes=None):
if x.dim() > 2:
x = torch.flatten(x, start_dim=1)
scores = self.cls_score(x)
proposal_deltas = self.bbox_pred(x)
if self.attr_on:
if self.training:
assert proposal_boxes is not None, "Proposals are None while attr=True"
proposals, fg_selection_atrributes = select_foreground_proposals(proposal_boxes, self.num_classes)
attribute_features = x[torch.cat(fg_selection_atrributes, dim=0)]
cls_labels = torch.cat([prop.gt_classes for prop in proposals])
else:
# get labels and indices of proposals with foreground
cls_labels = torch.argmax(scores, dim=1)
attribute_features = x
# get embeddings of indices using gt cls labels
cls_embed_out = self.cls_embed(cls_labels)
# concat with fc7 feats
concat_attr = cat([attribute_features, cls_embed_out], dim=1)
# pass through attr head layers
fc_attr = self.attr_linear1(concat_attr)
attr_score = self.attr_linear2(F.relu(fc_attr))
return scores, proposal_deltas, attr_score, cat([p.gt_attributes for p in proposals], dim=0) if self.training else None
return scores, proposal_deltas | 34,315 | 44.754667 | 184 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/rpn_outputs.py |
import itertools
import logging
import numpy as np
import torch
import torch.nn.functional as F
from fvcore.nn import smooth_l1_loss
from detectron2.layers import cat
from detectron2.structures import Instances, pairwise_iou
from detectron2.utils.events import get_event_storage
from detectron2.modeling.sampling import subsample_labels
from .box_regression import BUABoxes
from .layers.nms import batched_nms
def find_top_bua_rpn_proposals(
proposals,
pred_objectness_logits,
images,
nms_thresh,
pre_nms_topk,
post_nms_topk,
min_box_side_len,
training,
):
"""
For each feature map, select the `pre_nms_topk` highest scoring proposals,
apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk`
highest scoring proposals among all the feature maps if `training` is True,
otherwise, returns the highest `post_nms_topk` scoring proposals for each
feature map.
Args:
proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 4).
All proposal predictions on the feature maps.
pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A).
images (ImageList): Input images as an :class:`ImageList`.
nms_thresh (float): IoU threshold to use for NMS
pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS.
When RPN is run on multiple feature maps (as in FPN) this number is per
feature map.
post_nms_topk (int): number of top k scoring proposals to keep after applying NMS.
When RPN is run on multiple feature maps (as in FPN) this number is total,
over all feature maps.
min_box_side_len (float): minimum proposal box side length in pixels (absolute units
wrt input images).
training (bool): True if proposals are to be used in training, otherwise False.
This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..."
comment.
Returns:
proposals (list[Instances]): list of N Instances. The i-th Instances
stores post_nms_topk object proposals for image i.
"""
image_sizes = images.image_sizes # in (h, w) order
image_scales = images.image_scales
device = proposals[0].device
# 1. Concat all levels together
all_scores = []
all_proposals = []
level_ids = []
for level_id, proposals_i, logits_i in zip(
itertools.count(), proposals, pred_objectness_logits
):
Hi_Wi_A = logits_i.shape[1]
all_proposals.append(proposals_i)
all_scores.append(logits_i)
level_ids.append(torch.full((Hi_Wi_A,), level_id, dtype=torch.int64, device=device))
all_scores = cat(all_scores, dim=1)
all_proposals = cat(all_proposals, dim=1)
level_ids = cat(level_ids, dim=0)
# 2. For each image, run a choose pre_nms_topk proposal ,per-level NMS, and choose post_nms_topk results.
results = []
for n, image_size in enumerate(image_sizes):
boxes = BUABoxes(all_proposals[n])
scores_per_img = all_scores[n]
boxes.clip(image_size)
keep = boxes.filter_boxes()
boxes = boxes[keep]
scores_per_img = scores_per_img[keep]
lvl = level_ids[keep]
# filter empty boxes
keep = boxes.nonempty(threshold=min_box_side_len*image_scales[n])
if keep.sum().item() != len(boxes):
boxes, scores_per_img, lvl = boxes[keep], scores_per_img[keep], lvl[keep]
# choose pre_nms_topk proposal
Hi_Wi_A = scores_per_img.shape[0]
num_proposals_i = min(pre_nms_topk, Hi_Wi_A)
scores_per_img, idx = scores_per_img.sort(descending=True, dim=0)
topk_scores_i = scores_per_img[:num_proposals_i]
topk_idx = idx[:num_proposals_i]
topk_boxes_i = boxes[topk_idx, :]
lvl_i = lvl[topk_idx]
keep = batched_nms(topk_boxes_i.tensor, topk_scores_i, lvl_i, nms_thresh)
# In Detectron1, there was different behavior during training vs. testing.
# During training, topk is over the proposals from *all* images in the training batch.
# During testing, it is over the proposals for each image separately.
# As a result, the training behavior becomes batch-dependent,
# and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size.
# This bug is addressed in Detectron2 to make the behavior independent of batch size.
keep = keep[:post_nms_topk]
res = Instances(image_size)
res.proposal_boxes = topk_boxes_i[keep]
res.objectness_logits = topk_scores_i[keep]
results.append(res)
return results
class BUARPNOutputs(object):
def __init__(
self,
box2box_transform,
anchor_matcher,
batch_size_per_image,
positive_fraction,
images,
pred_objectness_logits,
pred_anchor_deltas,
anchors,
boundary_threshold=0,
gt_boxes=None,
smooth_l1_beta=0.0,
):
"""
Args:
box2box_transform (Box2BoxTransform): :class:`Box2BoxTransform` instance for
anchor-proposal transformations.
anchor_matcher (Matcher): :class:`Matcher` instance for matching anchors to
ground-truth boxes; used to determine training labels.
batch_size_per_image (int): number of proposals to sample when training
positive_fraction (float): target fraction of sampled proposals that should be positive
images (ImageList): :class:`ImageList` instance representing N input images
pred_objectness_logits (list[Tensor]): A list of L elements.
Element i is a tensor of shape (N, A, Hi, Wi) representing
the predicted objectness logits for anchors.
pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape
(N, A*4, Hi, Wi) representing the predicted "deltas" used to transform anchors
to proposals.
anchors (list[list[Boxes]]): A list of N elements. Each element is a list of L
Boxes. The Boxes at (n, l) stores the entire anchor array for feature map l in image
n (i.e. the cell anchors repeated over all locations in feature map (n, l)).
boundary_threshold (int): if >= 0, then anchors that extend beyond the image
boundary by more than boundary_thresh are not used in training. Set to a very large
number or < 0 to disable this behavior. Only needed in training.
gt_boxes (list[Boxes], optional): A list of N elements. Element i a Boxes storing
the ground-truth ("gt") boxes for image i.
smooth_l1_beta (float): The transition point between L1 and L2 loss in
the smooth L1 loss function. When set to 0, the loss becomes L1. When
set to +inf, the loss becomes constant 0.
"""
self.box2box_transform = box2box_transform
self.anchor_matcher = anchor_matcher
self.batch_size_per_image = batch_size_per_image
self.positive_fraction = positive_fraction
self.pred_objectness_logits = pred_objectness_logits
self.pred_anchor_deltas = pred_anchor_deltas
self.anchors = anchors
self.gt_boxes = gt_boxes
self.num_feature_maps = len(pred_objectness_logits)
self.num_images = len(images)
self.image_sizes = images.image_sizes
self.boundary_threshold = boundary_threshold
self.smooth_l1_beta = smooth_l1_beta
def _get_ground_truth(self):
"""
Returns:
gt_objectness_logits: list of N tensors. Tensor i is a vector whose length is the
total number of anchors in image i (i.e., len(anchors[i])). Label values are
in {-1, 0, 1}, with meanings: -1 = ignore; 0 = negative class; 1 = positive class.
gt_anchor_deltas: list of N tensors. Tensor i has shape (len(anchors[i]), 4).
"""
gt_objectness_logits = []
gt_anchor_deltas = []
# Concatenate anchors from all feature maps into a single Boxes per image
anchors = [BUABoxes.cat(anchors_i) for anchors_i in self.anchors]
for image_size_i, anchors_i, gt_boxes_i in zip(self.image_sizes, anchors, self.gt_boxes):
"""
image_size_i: (h, w) for the i-th image
anchors_i: anchors for i-th image
gt_boxes_i: ground-truth boxes for i-th image
"""
match_quality_matrix = pairwise_iou(gt_boxes_i, anchors_i)
matched_idxs, gt_objectness_logits_i = self.anchor_matcher(match_quality_matrix)
if self.boundary_threshold >= 0:
# Discard anchors that go out of the boundaries of the image
# NOTE: This is legacy functionality that is turned off by default in Detectron2
anchors_inside_image = anchors_i.inside_box(image_size_i, self.boundary_threshold)
gt_objectness_logits_i[~anchors_inside_image] = -1
if len(gt_boxes_i) == 0:
# These values won't be used anyway since the anchor is labeled as background
gt_anchor_deltas_i = torch.zeros_like(anchors_i.tensor)
else:
# TODO wasted computation for ignored boxes
matched_gt_boxes = gt_boxes_i[matched_idxs]
gt_anchor_deltas_i = self.box2box_transform.get_deltas(
anchors_i.tensor, matched_gt_boxes.tensor
)
gt_objectness_logits.append(gt_objectness_logits_i)
gt_anchor_deltas.append(gt_anchor_deltas_i)
return gt_objectness_logits, gt_anchor_deltas
def losses(self):
"""
Return the losses from a set of RPN predictions and their associated ground-truth.
Returns:
dict[loss name -> loss value]: A dict mapping from loss name to loss value.
Loss names are: `loss_rpn_cls` for objectness classification and
`loss_rpn_loc` for proposal localization.
"""
def resample(label):
"""
Randomly sample a subset of positive and negative examples by overwriting
the label vector to the ignore value (-1) for all elements that are not
included in the sample.
"""
pos_idx, neg_idx = subsample_labels(
label, self.batch_size_per_image, self.positive_fraction, 0
)
# Fill with the ignore label (-1), then set positive and negative labels
label.fill_(-1)
label.scatter_(0, pos_idx, 1)
label.scatter_(0, neg_idx, 0)
return label
gt_objectness_logits, gt_anchor_deltas = self._get_ground_truth()
"""
gt_objectness_logits: list of N tensors. Tensor i is a vector whose length is the
total number of anchors in image i (i.e., len(anchors[i]))
gt_anchor_deltas: list of N tensors. Tensor i has shape (len(anchors[i]), B),
where B is the box dimension
"""
# Collect all objectness labels and delta targets over feature maps and images
# The final ordering is L, N, H, W, A from slowest to fastest axis.
num_anchors_per_map = [int(np.prod(x.shape[1:])/2) for x in self.pred_objectness_logits]
num_anchors_per_image = sum(num_anchors_per_map)
# Stack to: (N, num_anchors_per_image)
gt_objectness_logits = torch.stack(
[resample(label) for label in gt_objectness_logits], dim=0
)
# Log the number of positive/negative anchors per-image that's used in training
num_pos_anchors = (gt_objectness_logits == 1).sum().item()
num_neg_anchors = (gt_objectness_logits == 0).sum().item()
storage = get_event_storage()
storage.put_scalar("rpn/num_pos_anchors", num_pos_anchors / self.num_images)
storage.put_scalar("rpn/num_neg_anchors", num_neg_anchors / self.num_images)
assert gt_objectness_logits.shape[1] == num_anchors_per_image
# Split to tuple of L tensors, each with shape (N, num_anchors_per_map)
gt_objectness_logits = torch.split(gt_objectness_logits, num_anchors_per_map, dim=1)
# Concat from all feature maps
gt_objectness_logits = cat([x.flatten() for x in gt_objectness_logits], dim=0)
# Stack to: (N, num_anchors_per_image, B)
gt_anchor_deltas = torch.stack(gt_anchor_deltas, dim=0)
assert gt_anchor_deltas.shape[1] == num_anchors_per_image
B = gt_anchor_deltas.shape[2] # box dimension (4 or 5)
# Split to tuple of L tensors, each with shape (N, num_anchors_per_image)
gt_anchor_deltas = torch.split(gt_anchor_deltas, num_anchors_per_map, dim=1)
# Concat from all feature maps
gt_anchor_deltas = cat([x.reshape(-1, B) for x in gt_anchor_deltas], dim=0)
# Collect all objectness logits and delta predictions over feature maps
# and images to arrive at the same shape as the labels and targets
# The final ordering is L, N, H, W, 2A from slowest to fastest axis.
pred_objectness_logits = cat(
[
# Reshape: (N, 2A, Hi, Wi) -> (N, Hi, Wi, 2A) -> (N*Hi*Wi*A, 2)
x.permute(0, 2, 3, 1).reshape(-1, 2)
for x in self.pred_objectness_logits
],
dim=0,
)
pred_anchor_deltas = cat(
[
# Reshape: (N, A*B, Hi, Wi) -> (N, A, B, Hi, Wi) -> (N, Hi, Wi, A, B)
# -> (N*Hi*Wi*A, B)
x.view(x.shape[0], -1, B, x.shape[-2], x.shape[-1])
.permute(0, 3, 4, 1, 2)
.reshape(-1, B)
for x in self.pred_anchor_deltas
],
dim=0,
)
objectness_loss, localization_loss = bua_rpn_losses(
gt_objectness_logits,
gt_anchor_deltas,
pred_objectness_logits,
pred_anchor_deltas,
self.smooth_l1_beta,
)
normalizer = 1.0 / (self.batch_size_per_image * self.num_images)
loss_cls = objectness_loss * normalizer # cls: classification loss
loss_loc = localization_loss * normalizer # loc: localization loss
losses = {"loss_rpn_cls": loss_cls, "loss_rpn_loc": loss_loc}
return losses
def predict_proposals(self):
"""
Transform anchors into proposals by applying the predicted anchor deltas.
Returns:
proposals (list[Tensor]): A list of L tensors. Tensor i has shape
(N, Hi*Wi*A, B), where B is box dimension (4 or 5).
"""
proposals = []
# Transpose anchors from images-by-feature-maps (N, L) to feature-maps-by-images (L, N)
anchors = list(zip(*self.anchors))
# anchors = list(zip(*[self.anchors]))
# For each feature map
for anchors_i, pred_anchor_deltas_i in zip(anchors, self.pred_anchor_deltas):
B = anchors_i[0].tensor.size(1)
N, _, Hi, Wi = pred_anchor_deltas_i.shape
# Reshape: (N, A*B, Hi, Wi) -> (N, A, B, Hi, Wi) -> (N, Hi, Wi, A, B) -> (N*Hi*Wi*A, B)
pred_anchor_deltas_i = (
pred_anchor_deltas_i.view(N, -1, B, Hi, Wi).permute(0, 3, 4, 1, 2).reshape(-1, B)
)
# Concatenate all anchors to shape (N*Hi*Wi*A, B)
# type(anchors_i[0]) is Boxes (B = 4) or RotatedBoxes (B = 5)
anchors_i = type(anchors_i[0]).cat(anchors_i)
proposals_i = self.box2box_transform.apply_deltas(
pred_anchor_deltas_i, anchors_i.tensor
)
# Append feature map proposals with shape (N, Hi*Wi*A, B)
proposals.append(proposals_i.view(N, -1, B))
return proposals
def predict_objectness_logits(self):
"""
Return objectness logits in the same format as the proposals returned by
:meth:`predict_proposals`.
Returns:
pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape
(N, Hi*Wi*A).
"""
pred_objectness_logits = [
# Reshape: (N, 2A, Hi, Wi) -> (N, 2, A, Hi, Wi) -> (N, Hi, Wi, 1, A) -> (N, Hi*Wi*A)
F.softmax(score.view(score.shape[0], 2, int(float(score.shape[1]) / float(2)), score.shape[2], score.shape[3]), dim=1)[:, 1:, :, :, :]\
.permute(0, 3, 4, 1, 2).reshape(self.num_images, -1)
for score in self.pred_objectness_logits
]
return pred_objectness_logits
def bua_rpn_losses(
gt_objectness_logits,
gt_anchor_deltas,
pred_objectness_logits,
pred_anchor_deltas,
smooth_l1_beta,
):
"""
Args:
gt_objectness_logits (Tensor): shape (N,), each element in {-1, 0, 1} representing
ground-truth objectness labels with: -1 = ignore; 0 = not object; 1 = object.
gt_anchor_deltas (Tensor): shape (N, box_dim), row i represents ground-truth
box2box transform targets (dx, dy, dw, dh) or (dx, dy, dw, dh, da) that map anchor i to
its matched ground-truth box.
pred_objectness_logits (Tensor): shape (N, 2), each element is a predicted objectness
logit.
pred_anchor_deltas (Tensor): shape (N, box_dim), each row is a predicted box2box
transform (dx, dy, dw, dh) or (dx, dy, dw, dh, da)
smooth_l1_beta (float): The transition point between L1 and L2 loss in
the smooth L1 loss function. When set to 0, the loss becomes L1. When
set to +inf, the loss becomes constant 0.
Returns:
objectness_loss, localization_loss, both unnormalized (summed over samples).
"""
pos_masks = gt_objectness_logits == 1
localization_loss = smooth_l1_loss(
pred_anchor_deltas[pos_masks], gt_anchor_deltas[pos_masks], smooth_l1_beta, reduction="sum"
)
valid_masks = gt_objectness_logits >= 0
objectness_loss = F.cross_entropy(
pred_objectness_logits[valid_masks],
gt_objectness_logits[valid_masks].to(torch.long),
reduction="sum",
)
return objectness_loss, localization_loss | 18,425 | 44.722084 | 147 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/rcnn.py |
import logging, os
import torch
from torch import nn
import torch.nn.functional as F
from detectron2.structures import ImageList
from detectron2.utils.logger import log_first_n
from detectron2.modeling.backbone import build_backbone
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.modeling.proposal_generator import build_proposal_generator
from detectron2.modeling.roi_heads import build_roi_heads
from detectron2.modeling.meta_arch import META_ARCH_REGISTRY
# from models.bua_caffe.postprocessing import extractor_postprocess
#from utils import save_features
__all__ = ["GeneralizedBUARCNN"]
@META_ARCH_REGISTRY.register()
class GeneralizedBUARCNN(nn.Module):
"""
Generalized R-CNN. Any models that contains the following three components:
1. Per-image feature extraction (aka backbone)
2. Region proposal generation
3. Per-region feature extraction and prediction
"""
def __init__(self, cfg):
super().__init__()
self.device = torch.device(cfg.MODEL.DEVICE)
self.bua_caffe = cfg.MODEL.BUA.CAFFE
self.resnet_version = cfg.MODEL.BUA.RESNET_VERSION
self.backbone = build_backbone(cfg)
self.in_features = cfg.MODEL.RPN.IN_FEATURES
self.proposal_generator = build_proposal_generator(cfg, self.backbone.output_shape())
self.roi_heads = build_roi_heads(cfg, self.backbone.output_shape())
assert len(cfg.MODEL.PIXEL_MEAN) == len(cfg.MODEL.PIXEL_STD)
self.extract_on = cfg.MODEL.BUA.EXTRACT_FEATS
self.extractor = cfg.MODEL.BUA.EXTRACTOR
self.to(self.device)
def forward(self, batched_inputs):
"""
Args:
batched_inputs: a list, batched outputs of :class:`DatasetMapper` .
Each item in the list contains the inputs for one image.
For now, each item in the list is a dict that contains:
* image: Tensor, image in (C, H, W) format.
* instances (optional): groundtruth :class:`Instances`
* proposals (optional): :class:`Instances`, precomputed proposals.
Other information that's included in the original dicts, such as:
* "height", "width" (int): the output resolution of the model, used in inference.
See :meth:`postprocess` for details.
Returns:
list[dict]:
Each dict is the output for one input image.
The dict contains one key "instances" whose value is a :class:`Instances`.
The :class:`Instances` object has the following keys:
"pred_boxes", "pred_classes", "scores", "pred_masks", "pred_keypoints"
"""
if not self.training:
return self.inference(batched_inputs)
images = self.preprocess_image(batched_inputs)
if "instances" in batched_inputs[0]:
gt_instances = [x["instances"].to(self.device) for x in batched_inputs]
elif "targets" in batched_inputs[0]:
log_first_n(
logging.WARN, "'targets' in the model inputs is now renamed to 'instances'!", n=10
)
gt_instances = [x["targets"].to(self.device) for x in batched_inputs]
else:
gt_instances = None
features = self.backbone(images.tensor)
if self.resnet_version == 2:
for f in features:
out = self.roi_heads.res5[0].norm(features[f])
features[f] = F.relu_(out)
if self.proposal_generator:
proposals, proposal_losses = self.proposal_generator(images, features, gt_instances)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
proposal_losses = {}
_, detector_losses = self.roi_heads(images, features, proposals, gt_instances)
losses = {}
losses.update(detector_losses)
losses.update(proposal_losses)
return losses
def inference(self, batched_inputs, detected_instances=None, do_postprocess=True):
"""
Run inference on the given inputs.
Args:
batched_inputs (list[dict]): same as in :meth:`forward`
detected_instances (None or list[Instances]): if not None, it
contains an `Instances` object per image. The `Instances`
object contains "pred_boxes" and "pred_classes" which are
known boxes in the image.
The inference will then skip the detection of bounding boxes,
and only predict other per-ROI outputs.
do_postprocess (bool): whether to apply post-processing on the outputs.
Returns:
same as in :meth:`forward`.
"""
assert not self.training
images = self.preprocess_image(batched_inputs)
features = self.backbone(images.tensor)
if self.resnet_version == 2:
for f in features:
out = self.roi_heads.res5[0].norm(features[f])
features[f] = F.relu_(out)
if detected_instances is None:
if self.proposal_generator:
proposals, _ = self.proposal_generator(images, features, None)
else:
assert "proposals" in batched_inputs[0]
proposals = [x["proposals"].to(self.device) for x in batched_inputs]
if self.extract_on:
return self.roi_heads(images, features, proposals, None)
else:
results, _ = self.roi_heads(images, features, proposals, None)
else:
detected_instances = [x.to(self.device) for x in detected_instances]
results = self.roi_heads.forward_with_given_boxes(features, detected_instances)
if do_postprocess:
processed_results = []
for results_per_image, input_per_image, image_size in zip(
results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
if not self.bua_caffe:
results_per_image = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": results_per_image})
return processed_results
else:
return results
def preprocess_image(self, batched_inputs):
"""
Normalize, pad and batch the input images.
"""
images = [x["image"].to(self.device) for x in batched_inputs]
image_scales = [x["im_scale"] for x in batched_inputs]
images = ImageList.from_tensors(images, self.backbone.size_divisibility)
images.image_scales = image_scales
return images
| 6,893 | 39.552941 | 98 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/rpn.py |
from typing import Dict, List
import torch
import torch.nn as nn
import torch.nn.functional as F
from detectron2.modeling import RPN_HEAD_REGISTRY
from detectron2.layers import ShapeSpec
from detectron2.modeling.proposal_generator import build_rpn_head
from detectron2.modeling.proposal_generator.build import PROPOSAL_GENERATOR_REGISTRY
from detectron2.modeling.anchor_generator import build_anchor_generator
from .box_regression import BUABox2BoxTransform
from detectron2.modeling.matcher import Matcher
from .rpn_outputs import BUARPNOutputs, find_top_bua_rpn_proposals
import copy
@RPN_HEAD_REGISTRY.register()
class StandardBUARPNHead(nn.Module):
"""
RPN classification and regression heads. Uses a 3x3 conv to produce a shared
hidden state from which one 1x1 conv predicts objectness logits for each anchor
and a second 1x1 conv predicts bounding-box deltas specifying how to deform
each anchor into an object proposal.
"""
def __init__(self, cfg, input_shape: List[ShapeSpec]):
super().__init__()
# Standard RPN is shared across levels:
out_channels = cfg.MODEL.BUA.RPN.CONV_OUT_CHANNELS
in_channels = [s.channels for s in input_shape]
assert len(set(in_channels)) == 1, "Each level must have the same channel!"
in_channels = in_channels[0]
# RPNHead should take the same input as anchor generator
# NOTE: it assumes that creating an anchor generator does not have unwanted side effect.
anchor_generator = build_anchor_generator(cfg, input_shape)
num_cell_anchors = anchor_generator.num_cell_anchors
box_dim = anchor_generator.box_dim
assert (
len(set(num_cell_anchors)) == 1
), "Each level must have the same number of cell anchors"
num_cell_anchors = num_cell_anchors[0]
# 3x3 conv for the hidden representation
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)
# 1x1 conv for predicting objectness logits
self.objectness_logits = nn.Conv2d(out_channels, num_cell_anchors * 2, kernel_size=1, stride=1)
# 1x1 conv for predicting box2box transform deltas
self.anchor_deltas = nn.Conv2d(
out_channels, num_cell_anchors * box_dim, kernel_size=1, stride=1
)
for l in [self.conv, self.objectness_logits, self.anchor_deltas]:
nn.init.normal_(l.weight, std=0.01)
nn.init.constant_(l.bias, 0)
def forward(self, features):
"""
Args:
features (list[Tensor]): list of feature maps
"""
pred_objectness_logits = []
pred_anchor_deltas = []
for x in features:
t = F.relu(self.conv(x))
pred_objectness_logits.append(self.objectness_logits(t))
pred_anchor_deltas.append(self.anchor_deltas(t))
return pred_objectness_logits, pred_anchor_deltas
@PROPOSAL_GENERATOR_REGISTRY.register()
class BUARPN(nn.Module):
"""
Region Proposal Network, introduced by the Faster R-CNN paper.
"""
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__()
# fmt: off
self.min_box_side_len = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
self.in_features = cfg.MODEL.RPN.IN_FEATURES
self.nms_thresh = cfg.MODEL.RPN.NMS_THRESH
self.batch_size_per_image = cfg.MODEL.RPN.BATCH_SIZE_PER_IMAGE
self.positive_fraction = cfg.MODEL.RPN.POSITIVE_FRACTION
self.smooth_l1_beta = cfg.MODEL.RPN.SMOOTH_L1_BETA
self.loss_weight = cfg.MODEL.RPN.LOSS_WEIGHT
# fmt: on
# Map from self.training state to train/test settings
self.pre_nms_topk = {
True: cfg.MODEL.RPN.PRE_NMS_TOPK_TRAIN,
False: cfg.MODEL.RPN.PRE_NMS_TOPK_TEST,
}
self.post_nms_topk = {
True: cfg.MODEL.RPN.POST_NMS_TOPK_TRAIN,
False: cfg.MODEL.RPN.POST_NMS_TOPK_TEST,
}
self.boundary_threshold = cfg.MODEL.RPN.BOUNDARY_THRESH
self.anchor_generator = build_anchor_generator(
cfg, [input_shape[f] for f in self.in_features]
)
self.box2box_transform = BUABox2BoxTransform(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS)
self.anchor_matcher = Matcher(
cfg.MODEL.RPN.IOU_THRESHOLDS, cfg.MODEL.RPN.IOU_LABELS, allow_low_quality_matches=True
)
self.rpn_head = build_rpn_head(cfg, [input_shape[f] for f in self.in_features])
def forward(self, images, features, gt_instances=None):
"""
Args:
images (ImageList): input images of length `N`
features (dict[str: Tensor]): input data as a mapping from feature
map name to tensor. Axis 0 represents the number of images `N` in
the input data; axes 1-3 are channels, height, and width, which may
vary between feature maps (e.g., if a feature pyramid is used).
gt_instances (list[Instances], optional): a length `N` list of `Instances`s.
Each `Instances` stores ground-truth instances for the corresponding image.
Returns:
proposals: list[Instances] or None
loss: dict[Tensor]
"""
gt_boxes = [x.gt_boxes for x in gt_instances] if gt_instances is not None else None
del gt_instances
features = [features[f] for f in self.in_features]
pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features)
anchors_in_image = self.anchor_generator(features)
anchors = [copy.deepcopy(anchors_in_image) for _ in range(len(features[0]))]
# TODO: The anchors only depend on the feature map shape; there's probably
# an opportunity for some optimizations (e.g., caching anchors).
outputs = BUARPNOutputs(
self.box2box_transform,
self.anchor_matcher,
self.batch_size_per_image,
self.positive_fraction,
images,
pred_objectness_logits,
pred_anchor_deltas,
anchors,
self.boundary_threshold,
gt_boxes,
self.smooth_l1_beta,
)
if self.training:
losses = {k: v * self.loss_weight for k, v in outputs.losses().items()}
else:
losses = {}
with torch.no_grad():
# Find the top proposals by applying NMS and removing boxes that
# are too small. The proposals are treated as fixed for approximate
# joint training with roi heads. This approach ignores the derivative
# w.r.t. the proposal boxes’ coordinates that are also network
# responses, so is approximate.
proposals = find_top_bua_rpn_proposals(
outputs.predict_proposals(),
outputs.predict_objectness_logits(),
images,
self.nms_thresh,
self.pre_nms_topk[self.training],
self.post_nms_topk[self.training],
self.min_box_side_len,
self.training,
)
# For RPN-only models, the proposals are the final output and we return them in
# high-to-low confidence order.
# For end-to-end models, the RPN proposals are an intermediate state
# and this sorting is actually not needed. But the cost is negligible.
# inds = [p.objectness_logits.sort(descending=True)[1] for p in proposals]
# proposals = [p[ind] for p, ind in zip(proposals, inds)]
return proposals, losses | 7,700 | 42.022346 | 103 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/fast_rcnn_outputs.py | import torch
import torch.functional as F
from detectron2.layers import ShapeSpec, batched_nms, cat, cross_entropy, nonzero_tuple
from fvcore.nn import giou_loss, smooth_l1_loss
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.structures import Boxes
class FastRCNNOutputs:
"""
An internal implementation that stores information about outputs of a Fast R-CNN head,
and provides methods that are used to decode the outputs of a Fast R-CNN head.
"""
def __init__(
self,
box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
smooth_l1_beta=0.0,
box_reg_loss_type="smooth_l1",
):
"""
Args:
box2box_transform (Box2BoxTransform/Box2BoxTransformRotated):
box2box transform instance for proposal-to-detection transformations.
pred_class_logits (Tensor): A tensor of shape (R, K + 1) storing the predicted class
logits for all R predicted object instances.
Each row corresponds to a predicted object instance.
pred_proposal_deltas (Tensor): A tensor of shape (R, K * B) or (R, B) for
class-specific or class-agnostic regression. It stores the predicted deltas that
transform proposals into final box detections.
B is the box dimension (4 or 5).
When B is 4, each row is [dx, dy, dw, dh (, ....)].
When B is 5, each row is [dx, dy, dw, dh, da (, ....)].
proposals (list[Instances]): A list of N Instances, where Instances i stores the
proposals for image i, in the field "proposal_boxes".
When training, each Instances must have ground-truth labels
stored in the field "gt_classes" and "gt_boxes".
The total number of all instances must be equal to R.
smooth_l1_beta (float): The transition point between L1 and L2 loss in
the smooth L1 loss function. When set to 0, the loss becomes L1. When
set to +inf, the loss becomes constant 0.
box_reg_loss_type (str): Box regression loss type. One of: "smooth_l1", "giou"
"""
self.box2box_transform = box2box_transform
self.num_preds_per_image = [len(p) for p in proposals]
self.pred_class_logits = pred_class_logits
self.pred_proposal_deltas = pred_proposal_deltas
self.smooth_l1_beta = smooth_l1_beta
self.box_reg_loss_type = box_reg_loss_type
self.image_shapes = [x.image_size for x in proposals]
if len(proposals):
box_type = type(proposals[0].proposal_boxes)
# cat(..., dim=0) concatenates over all images in the batch
self.proposals = box_type.cat([p.proposal_boxes for p in proposals])
assert (
not self.proposals.tensor.requires_grad
), "Proposals should not require gradients!"
# "gt_classes" exists if and only if training. But other gt fields may
# not necessarily exist in training for images that have no groundtruth.
if proposals[0].has("gt_classes"):
self.gt_classes = cat([p.gt_classes for p in proposals], dim=0)
# If "gt_boxes" does not exist, the proposals must be all negative and
# should not be included in regression loss computation.
# Here we just use proposal_boxes as an arbitrary placeholder because its
# value won't be used in self.box_reg_loss().
gt_boxes = [
p.gt_boxes if p.has("gt_boxes") else p.proposal_boxes for p in proposals
]
self.gt_boxes = box_type.cat(gt_boxes)
else:
self.proposals = Boxes(torch.zeros(0, 4, device=self.pred_proposal_deltas.device))
self._no_instances = len(self.proposals) == 0 # no instances found
def softmax_cross_entropy_loss(self):
"""
Deprecated
"""
_log_classification_stats(self.pred_class_logits, self.gt_classes)
return cross_entropy(self.pred_class_logits, self.gt_classes, reduction="mean")
def box_reg_loss(self):
"""
Deprecated
"""
if self._no_instances:
return 0.0 * self.pred_proposal_deltas.sum()
box_dim = self.proposals.tensor.size(1) # 4 or 5
cls_agnostic_bbox_reg = self.pred_proposal_deltas.size(1) == box_dim
device = self.pred_proposal_deltas.device
bg_class_ind = self.pred_class_logits.shape[1] - 1
# Box delta loss is only computed between the prediction for the gt class k
# (if 0 <= k < bg_class_ind) and the target; there is no loss defined on predictions
# for non-gt classes and background.
# Empty fg_inds should produce a valid loss of zero because reduction=sum.
fg_inds = nonzero_tuple((self.gt_classes >= 0) & (self.gt_classes < bg_class_ind))[0]
if cls_agnostic_bbox_reg:
# pred_proposal_deltas only corresponds to foreground class for agnostic
gt_class_cols = torch.arange(box_dim, device=device)
else:
# pred_proposal_deltas for class k are located in columns [b * k : b * k + b],
# where b is the dimension of box representation (4 or 5)
# Note that compared to Detectron1,
# we do not perform bounding box regression for background classes.
gt_class_cols = box_dim * self.gt_classes[fg_inds, None] + torch.arange(
box_dim, device=device
)
if self.box_reg_loss_type == "smooth_l1":
gt_proposal_deltas = self.box2box_transform.get_deltas(
self.proposals.tensor, self.gt_boxes.tensor
)
loss_box_reg = smooth_l1_loss(
self.pred_proposal_deltas[fg_inds[:, None], gt_class_cols],
gt_proposal_deltas[fg_inds],
self.smooth_l1_beta,
reduction="sum",
)
elif self.box_reg_loss_type == "giou":
fg_pred_boxes = self.box2box_transform.apply_deltas(
self.pred_proposal_deltas[fg_inds[:, None], gt_class_cols],
self.proposals.tensor[fg_inds],
)
loss_box_reg = giou_loss(
fg_pred_boxes,
self.gt_boxes.tensor[fg_inds],
reduction="sum",
)
else:
raise ValueError(f"Invalid bbox reg loss type '{self.box_reg_loss_type}'")
loss_box_reg = loss_box_reg / self.gt_classes.numel()
return loss_box_reg
def losses(self):
"""
Deprecated
"""
return {"loss_cls": self.softmax_cross_entropy_loss(), "loss_box_reg": self.box_reg_loss()}
def predict_boxes(self):
"""
Deprecated
"""
pred = self.box2box_transform.apply_deltas(self.pred_proposal_deltas, self.proposals.tensor)
return pred.split(self.num_preds_per_image, dim=0)
def predict_probs(self):
"""
Deprecated
"""
probs = F.softmax(self.pred_class_logits, dim=-1)
return probs.split(self.num_preds_per_image, dim=0) | 7,315 | 44.440994 | 100 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/roi_heads.py | # -*- coding: utf-8 -*-
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from detectron2.utils.events import get_event_storage
from detectron2.modeling import ROI_HEADS_REGISTRY, ROIHeads
from detectron2.structures import Boxes, Instances, pairwise_iou
from detectron2.modeling.sampling import subsample_labels
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.backbone.resnet import BottleneckBlock
from detectron2.modeling.proposal_generator.proposal_utils import add_ground_truth_to_proposals
from detectron2.layers import get_norm, BatchNorm2d
from .fast_rcnn import BUACaffeFastRCNNOutputs, BUACaffeFastRCNNOutputLayers, BUADetection2FastRCNNOutputs, BUADetectron2FastRCNNOutputLayers
from .box_regression import BUABox2BoxTransform
from .backbone import BottleneckBlockv2
def make_stage(block_class, num_blocks, first_stride, **kwargs):
"""
Create a resnet stage by creating many blocks.
Args:
block_class (class): a subclass of ResNetBlockBase
num_blocks (int):
first_stride (int): the stride of the first block. The other blocks will have stride=1.
A `stride` argument will be passed to the block constructor.
kwargs: other arguments passed to the block constructor.
Returns:
list[nn.Module]: a list of block module.
"""
blocks = []
for i in range(num_blocks):
if kwargs["dilation"] > 1:
first_stride = 1
blocks.append(block_class(stride=first_stride if i == 0 else 1, **kwargs))
kwargs["in_channels"] = kwargs["out_channels"]
return blocks
@ROI_HEADS_REGISTRY.register()
class BUACaffeRes5ROIHeads(ROIHeads):
"""
The ROIHeads in a typical "C4" R-CNN model, where
the box and mask head share the cropping and
the per-region feature computation by a Res5 block.
"""
def __init__(self, cfg, input_shape):
# super().__init__(cfg, input_shape)
super().__init__(cfg)
self.in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
self.feature_strides = {k: v.stride for k, v in input_shape.items()}
self.cls_agnostic_bbox_reg = cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG
self.smooth_l1_beta = cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA
assert len(self.in_features) == 1
# fmt: off
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
pooler_scales = (1.0 / self.feature_strides[self.in_features[0]], )
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
self.resnet_version = cfg.MODEL.BUA.RESNET_VERSION
self.attr_on = cfg.MODEL.BUA.ATTRIBUTE_ON
self.extract_on = cfg.MODEL.BUA.EXTRACT_FEATS
self.num_attr_classes = cfg.MODEL.BUA.ATTRIBUTE.NUM_CLASSES
self.extractor_mode = cfg.MODEL.BUA.EXTRACTOR.MODE
self.test_score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST
self.test_nms_thresh = cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST
self.test_detections_per_img = cfg.TEST.DETECTIONS_PER_IMAGE
self.pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
self.box2box_transform = BUABox2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS)
self.res5, out_channels = self._build_res5_block(cfg)
if self.resnet_version == 2:
self.res5_bn = BatchNorm2d(out_channels, eps=2e-5)
self.box_predictor = BUACaffeFastRCNNOutputLayers(
out_channels, self.num_classes, self.cls_agnostic_bbox_reg, attr_on=self.attr_on, num_attr_classes=self.num_attr_classes
)
def _build_res5_block(self, cfg):
# fmt: off
stage_channel_factor = 2 ** 3 # res5 is 8x res2
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group * stage_channel_factor
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS * stage_channel_factor
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
norm = cfg.MODEL.RESNETS.NORM
dilation = cfg.MODEL.RESNETS.RES5_DILATION
assert not cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE[-1], \
"Deformable conv is not yet supported in res5 head."
# fmt: on
blocks = make_stage(
BottleneckBlock if self.resnet_version == 1 else BottleneckBlockv2,
3,
first_stride=2,
in_channels=out_channels // 2,
bottleneck_channels=bottleneck_channels,
out_channels=out_channels,
num_groups=num_groups,
norm=norm,
stride_in_1x1=stride_in_1x1,
dilation=dilation,
)
return nn.Sequential(*blocks), out_channels
def _shared_roi_transform(self, features, boxes):
x = self.pooler(features, boxes)
if self.resnet_version == 2:
out = self.res5[0].conv1(x)
out = self.res5[0].conv2(out)
out = self.res5[0].conv3(out)
if self.res5[0].shortcut is not None:
shortcut = self.res5[0].shortcut(x)
else:
shortcut = x
out += shortcut
out = self.res5[1:](out)
return F.relu_(self.res5_bn(out))
return self.res5(x)
def forward(self, images, features, proposals, targets=None):
"""
See :class:`ROIHeads.forward`.
"""
image_scales = images.image_scales
del images
if self.training:
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
proposal_boxes = [x.proposal_boxes for x in proposals]
box_features = self._shared_roi_transform(
[features[f] for f in self.in_features], proposal_boxes
)
feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1
if self.attr_on:
pred_class_logits, pred_proposal_deltas, attr_scores = self.box_predictor(feature_pooled, proposals)
else:
pred_class_logits, pred_proposal_deltas = self.box_predictor(feature_pooled, proposals)
if not self.extract_on:
del feature_pooled
outputs = BUACaffeFastRCNNOutputs(
self.box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
self.smooth_l1_beta,
image_scales
)
if self.training:
del features
losses = outputs.losses()
return [], losses
else:
if self.extract_on:
num_preds_per_image = [len(p) for p in proposals]
if self.extractor_mode == 1 or self.extractor_mode == 3:
if self.attr_on:
return proposal_boxes, outputs.predict_probs(), feature_pooled.split(num_preds_per_image, dim=0), attr_scores.split(num_preds_per_image, dim=0)
else:
return proposal_boxes, outputs.predict_probs(), feature_pooled.split(num_preds_per_image, dim=0)
elif self.extractor_mode == 2:
return outputs.predict_boxes(), outputs.predict_probs()
else:
raise ValueError('BUA.EXTRATOR.MODE ERROR')
pred_instances, _ = outputs.inference(
self.test_score_thresh, self.test_nms_thresh, self.test_detections_per_img
)
return pred_instances, {}
@ROI_HEADS_REGISTRY.register()
class BUADetectron2Res5ROIHeads(ROIHeads):
"""
The ROIHeads in a typical "C4" R-CNN model, where
the box and mask head share the cropping and
the per-region feature computation by a Res5 block.
"""
def __init__(self, cfg, input_shape):
# super().__init__(cfg, input_shape)
super().__init__(cfg)
self.in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
self.feature_strides = {k: v.stride for k, v in input_shape.items()}
self.cls_agnostic_bbox_reg = cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG
self.smooth_l1_beta = cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA
self.positive_sample_fraction = cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION
assert len(self.in_features) == 1
# fmt: off
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
pooler_scales = (1.0 / self.feature_strides[self.in_features[0]], )
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
self.resnet_version = cfg.MODEL.BUA.RESNET_VERSION
self.attr_on = cfg.MODEL.BUA.ATTRIBUTE_ON
self.extract_on = cfg.MODEL.BUA.EXTRACT_FEATS
self.num_attr_classes = cfg.MODEL.BUA.ATTRIBUTE.NUM_CLASSES
self.extractor_mode = cfg.MODEL.BUA.EXTRACTOR.MODE
self.test_score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST
self.test_nms_thresh = cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST
self.test_detections_per_img = cfg.TEST.DETECTIONS_PER_IMAGE
self.pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
self.box2box_transform = BUABox2BoxTransform(weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS)
self.res5, out_channels = self._build_res5_block(cfg)
if self.resnet_version == 2:
self.res5_bn = BatchNorm2d(out_channels, eps=2e-5)
self.box_predictor = BUADetectron2FastRCNNOutputLayers(
out_channels, self.num_classes, self.cls_agnostic_bbox_reg, \
attr_on=self.attr_on, num_attr_classes=self.num_attr_classes
)
def _sample_proposals(self, matched_idxs, matched_labels, gt_classes, gt_attributes):
"""
Based on the matching between N proposals and M groundtruth,
sample the proposals and set their classification labels.
Args:
matched_idxs (Tensor): a vector of length N, each is the best-matched
gt index in [0, M) for each proposal.
matched_labels (Tensor): a vector of length N, the matcher's label
(one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal.
gt_classes (Tensor): a vector of length M.
Returns:
Tensor: a vector of indices of sampled proposals. Each is in [0, N).
Tensor: a vector of the same length, the classification label for
each sampled proposal. Each sample is labeled as either a category in
[0, num_classes) or the background (num_classes).
"""
has_gt = gt_classes.numel() > 0
# Get the corresponding GT for each proposal
if has_gt:
gt_classes = gt_classes[matched_idxs]
gt_attributes = gt_attributes[matched_idxs, :]
# Label unmatched proposals (0 label from matcher) as background (label=num_classes)
gt_classes[matched_labels == 0] = self.num_classes
# Label ignore proposals (-1 label)
gt_classes[matched_labels == -1] = -1
else:
gt_classes = torch.zeros_like(matched_idxs) + self.num_classes
gt_clagt_attributes = -torch.ones((len(matched_idxs),16), dtype=torch.int64).cuda()
sampled_fg_idxs, sampled_bg_idxs = subsample_labels(
gt_classes, self.batch_size_per_image, self.positive_sample_fraction, self.num_classes
)
sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)
return sampled_idxs, gt_classes[sampled_idxs], gt_attributes[sampled_idxs]
def _build_res5_block(self, cfg):
# fmt: off
stage_channel_factor = 2 ** 3 # res5 is 8x res2
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group * stage_channel_factor
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS * stage_channel_factor
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
norm = cfg.MODEL.RESNETS.NORM
dilation = cfg.MODEL.RESNETS.RES5_DILATION
assert not cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE[-1], \
"Deformable conv is not yet supported in res5 head."
# fmt: on
blocks = make_stage(
BottleneckBlock if self.resnet_version == 1 else BottleneckBlockv2,
3,
first_stride=2,
in_channels=out_channels // 2,
bottleneck_channels=bottleneck_channels,
out_channels=out_channels,
num_groups=num_groups,
norm=norm,
stride_in_1x1=stride_in_1x1,
dilation=dilation,
)
return nn.Sequential(*blocks), out_channels
def _shared_roi_transform(self, features, boxes):
x = self.pooler(features, boxes)
if self.resnet_version == 2:
out = self.res5[0].conv1(x)
out = self.res5[0].conv2(out)
out = self.res5[0].conv3(out)
if self.res5[0].shortcut is not None:
shortcut = self.res5[0].shortcut(x)
else:
shortcut = x
out += shortcut
out = self.res5[1:](out)
return F.relu_(self.res5_bn(out))
return self.res5(x)
@torch.no_grad()
def label_and_sample_proposals(self, proposals, targets):
"""
Prepare some proposals to be used to train the ROI heads.
It performs box matching between `proposals` and `targets`, and assigns
training labels to the proposals.
It returns ``self.batch_size_per_image`` random samples from proposals and groundtruth
boxes, with a fraction of positives that is no larger than
``self.positive_sample_fraction``.
Args:
See :meth:`ROIHeads.forward`
Returns:
list[Instances]:
length `N` list of `Instances`s containing the proposals
sampled for training. Each `Instances` has the following fields:
- proposal_boxes: the proposal boxes
- gt_boxes: the ground-truth box that the proposal is assigned to
(this is only meaningful if the proposal has a label > 0; if label = 0
then the ground-truth box is random)
Other fields such as "gt_classes", "gt_masks", that's included in `targets`.
"""
gt_boxes = [x.gt_boxes for x in targets]
# Augment proposals with ground-truth boxes.
# In the case of learned proposals (e.g., RPN), when training starts
# the proposals will be low quality due to random initialization.
# It's possible that none of these initial
# proposals have high enough overlap with the gt objects to be used
# as positive examples for the second stage components (box head,
# cls head, mask head). Adding the gt boxes to the set of proposals
# ensures that the second stage components will have some positive
# examples from the start of training. For RPN, this augmentation improves
# convergence and empirically improves box AP on COCO by about 0.5
# points (under one tested configuration).
if self.proposal_append_gt:
proposals = add_ground_truth_to_proposals(gt_boxes, proposals)
proposals_with_gt = []
num_fg_samples = []
num_bg_samples = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
has_gt = len(targets_per_image) > 0
match_quality_matrix = pairwise_iou(
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
)
matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
sampled_idxs, gt_classes, gt_attributes = self._sample_proposals(
matched_idxs, matched_labels, targets_per_image.gt_classes, targets_per_image.gt_attributes
)
# Set target attributes of the sampled proposals:
proposals_per_image = proposals_per_image[sampled_idxs]
proposals_per_image.gt_classes = gt_classes
proposals_per_image.gt_attributes = gt_attributes
# We index all the attributes of targets that start with "gt_"
# and have not been added to proposals yet (="gt_classes").
if has_gt:
sampled_targets = matched_idxs[sampled_idxs]
# NOTE: here the indexing waste some compute, because heads
# like masks, keypoints, etc, will filter the proposals again,
# (by foreground/background, or number of keypoints in the image, etc)
# so we essentially index the data twice.
for (trg_name, trg_value) in targets_per_image.get_fields().items():
if trg_name.startswith("gt_") and not proposals_per_image.has(trg_name):
proposals_per_image.set(trg_name, trg_value[sampled_targets])
else:
gt_boxes = Boxes(
targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 4))
)
proposals_per_image.gt_boxes = gt_boxes
num_bg_samples.append((gt_classes == self.num_classes).sum().item())
num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])
proposals_with_gt.append(proposals_per_image)
# Log the number of fg/bg samples that are selected for training ROI heads
storage = get_event_storage()
storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples))
storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples))
return proposals_with_gt
def forward(self, images, features, proposals, targets=None):
"""
See :class:`ROIHeads.forward`.
"""
# image_scales = images.image_scales
del images
if self.training:
proposals = self.label_and_sample_proposals(proposals, targets)
del targets
proposal_boxes = [x.proposal_boxes for x in proposals]
box_features = self._shared_roi_transform(
[features[f] for f in self.in_features], proposal_boxes
)
feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1
if self.attr_on:
pred_class_logits, pred_proposal_deltas, pred_attribute_logits, gt_attributes = self.box_predictor(feature_pooled, proposals)
else:
pred_class_logits, pred_proposal_deltas = self.box_predictor(feature_pooled, proposals)
if not self.extract_on:
del feature_pooled
if self.attr_on:
outputs = BUADetection2FastRCNNOutputs(
self.box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
self.smooth_l1_beta,
self.attr_on,
pred_attribute_logits=pred_attribute_logits,
num_attr_classes=self.num_attr_classes,
gt_attributes=gt_attributes,
)
else:
outputs = BUADetection2FastRCNNOutputs(
self.box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
self.smooth_l1_beta,
self.attr_on,
)
if self.training:
del features
losses = outputs.losses()
return [], losses
else:
if self.extract_on:
num_preds_per_image = [len(p) for p in proposals]
if self.extractor_mode == 1 or self.extractor_mode == 3:
if self.attr_on:
return proposal_boxes, outputs.predict_probs(), feature_pooled.split(num_preds_per_image, dim=0), F.softmax(pred_attribute_logits, dim=-1).split(num_preds_per_image, dim=0)
else:
return proposal_boxes, outputs.predict_probs(), feature_pooled.split(num_preds_per_image, dim=0)
elif self.extractor_mode == 2:
return outputs.predict_boxes(), outputs.predict_probs()
else:
raise ValueError('BUA.EXTRATOR.MODE ERROR')
pred_instances, _ = outputs.inference(
self.test_score_thresh, self.test_nms_thresh, self.test_detections_per_img
)
return pred_instances, {}
| 21,094 | 43.882979 | 196 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/backbone.py |
import fvcore.nn.weight_init as weight_init
from torch import nn
import torch.nn.functional as F
from detectron2.layers import Conv2d, FrozenBatchNorm2d, get_norm, BatchNorm2d
from detectron2.modeling import BACKBONE_REGISTRY, ResNet, make_stage
from detectron2.modeling.backbone.resnet import BottleneckBlock, DeformBottleneckBlock, ResNetBlockBase
from .layers.wrappers import Conv2dv2
__all__ = ["BUABasicStem", "BUABasicStemv2", "build_bua_resnet_backbone"]
class BUABasicStem(nn.Module):
def __init__(self, in_channels=3, out_channels=64, norm="BN"):
"""
Args:
norm (str or callable): a callable that takes the number of
channels and return a `nn.Module`, or a pre-defined string
(one of {"FrozenBN", "BN", "GN"}).
"""
super().__init__()
self.conv1 = Conv2d(
in_channels,
out_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False,
norm=get_norm(norm, out_channels),
)
weight_init.c2_msra_fill(self.conv1)
def forward(self, x):
x = self.conv1(x)
x = F.relu_(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=0, ceil_mode=True)
return x
@property
def out_channels(self):
return self.conv1.out_channels
@property
def stride(self):
return 4 # = stride 2 conv -> stride 2 max pool
class BUABasicStemv2(nn.Module):
def __init__(self, in_channels=3, out_channels=64, norm="BN"):
"""
Args:
norm (str or callable): a callable that takes the number of
channels and return a `nn.Module`, or a pre-defined string
(one of {"FrozenBN", "BN", "GN"}).
"""
super().__init__()
self.norm = BatchNorm2d(in_channels, eps=2e-5)
self.conv1 = Conv2d(
in_channels,
out_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False,
norm=BatchNorm2d(out_channels, eps=2e-5),
)
# weight_init.c2_msra_fill(self.norm)
weight_init.c2_msra_fill(self.conv1)
def forward(self, x):
x = self.norm(x)
x = self.conv1(x)
x = F.relu_(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=0, ceil_mode=True)
return x
@property
def out_channels(self):
return self.conv1.out_channels
@property
def stride(self):
return 4 # = stride 2 conv -> stride 2 max pool
@BACKBONE_REGISTRY.register()
def build_bua_resnet_backbone(cfg, input_shape):
"""
Create a ResNet instance from config.
Returns:
ResNet: a :class:`ResNet` instance.
"""
# need registration of new blocks/stems?
norm = cfg.MODEL.RESNETS.NORM
if cfg.MODEL.BUA.RESNET_VERSION == 2:
stem = BUABasicStemv2(
in_channels=input_shape.channels,
out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
)
else:
stem = BUABasicStem(
in_channels=input_shape.channels,
out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
norm=norm,
)
freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT
if freeze_at >= 1:
for p in stem.parameters():
p.requires_grad = False
stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem)
# fmt: off
out_features = cfg.MODEL.RESNETS.OUT_FEATURES
depth = cfg.MODEL.RESNETS.DEPTH
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group
in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION
deform_on_per_stage = cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE
deform_modulated = cfg.MODEL.RESNETS.DEFORM_MODULATED
deform_num_groups = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS
# fmt: on
assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation)
num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth]
stages = []
# Avoid creating variables without gradients
# It consumes extra memory and may cause allreduce to fail
out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features]
max_stage_idx = max(out_stage_idx)
for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
dilation = res5_dilation if stage_idx == 5 else 1
first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2
stage_kargs = {
"num_blocks": num_blocks_per_stage[idx],
"first_stride": first_stride,
"in_channels": in_channels,
"bottleneck_channels": bottleneck_channels,
"out_channels": out_channels,
"num_groups": num_groups,
"norm": norm,
"stride_in_1x1": stride_in_1x1,
"dilation": dilation,
}
if deform_on_per_stage[idx]:
stage_kargs["block_class"] = DeformBottleneckBlock
stage_kargs["deform_modulated"] = deform_modulated
stage_kargs["deform_num_groups"] = deform_num_groups
else:
stage_kargs["block_class"] = BottleneckBlock if cfg.MODEL.BUA.RESNET_VERSION == 1 else BottleneckBlockv2
blocks = make_stage(**stage_kargs)
in_channels = out_channels
out_channels *= 2
bottleneck_channels *= 2
if freeze_at >= stage_idx:
for block in blocks:
block.freeze()
stages.append(blocks)
return ResNet(stem, stages, out_features=out_features)
class BottleneckBlockv2(ResNetBlockBase):
def __init__(
self,
in_channels,
out_channels,
*,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN",
stride_in_1x1=False,
dilation=1,
):
"""
Args:
norm (str or callable): a callable that takes the number of
channels and return a `nn.Module`, or a pre-defined string
(one of {"FrozenBN", "BN", "GN"}).
stride_in_1x1 (bool): when stride==2, whether to put stride in the
first 1x1 convolution or the bottleneck 3x3 convolution.
"""
super().__init__(in_channels, out_channels, stride)
if in_channels != out_channels:
self.shortcut = Conv2dv2(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=None,
)
else:
self.shortcut = None
# The original MSRA ResNet models have stride in the first 1x1 conv
# The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have
# stride in the 3x3 conv
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.conv1 = Conv2dv2(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=None,
)
self.conv2 = Conv2dv2(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
padding=1 * dilation,
bias=False,
groups=num_groups,
dilation=dilation,
norm=BatchNorm2d(bottleneck_channels, eps=2e-5),
activation=F.relu_,
)
self.conv3 = Conv2dv2(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=BatchNorm2d(bottleneck_channels, eps=2e-5),
activation=F.relu_,
)
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
self.norm = BatchNorm2d(in_channels, eps=2e-5)
# Zero-initialize the last normalization in each residual branch,
# so that at the beginning, the residual branch starts with zeros,
# and each residual block behaves like an identity.
# See Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
# "For BN layers, the learnable scaling coefficient γ is initialized
# to be 1, except for each residual block's last BN
# where γ is initialized to be 0."
# nn.init.constant_(self.conv3.norm.weight, 0)
# TODO this somehow hurts performance when training GN models from scratch.
# Add it as an option when we need to use this code to train a backbone.
def forward(self, x):
x_2 = self.norm(x)
x_2 = F.relu_(x_2)
out = self.conv1(x_2)
# out = F.relu_(out)
out = self.conv2(out)
# out = F.relu_(out)
out = self.conv3(out)
if self.shortcut is not None:
shortcut = self.shortcut(x_2)
else:
shortcut = x
out += shortcut
# out = F.relu_(out)
return out | 9,404 | 33.076087 | 116 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/__init__.py | from .backbone import build_bua_resnet_backbone
from .rcnn import GeneralizedBUARCNN
from .roi_heads import BUACaffeRes5ROIHeads
from .rpn import StandardBUARPNHead, BUARPN
| 173 | 33.8 | 47 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/layers/nms.py |
# from ._utils import _C
from bua.caffe.modeling import _C
from apex import amp
import torch
# Only valid with fp32 inputs - give AMP the hint
nms = amp.float_function(_C.nms)
# nms.__doc__ = """
# This function performs Non-maximum suppresion"""
# NOTE: In order to be consistent with bottom-up-attention, we nms core function from maskrcnn-benchmark
def batched_nms(boxes, scores, idxs, iou_threshold):
"""
Same as torchvision.ops.boxes.batched_nms, but safer.
"""
assert boxes.shape[-1] == 4
boxes = boxes.cpu()
scores = scores.cpu()
# TODO may need better strategy.
# Investigate after having a fully-cuda NMS op.
if len(boxes) < 40000:
return box_ops_batched_nms(boxes, scores, idxs, iou_threshold)
result_mask = scores.new_zeros(scores.size(), dtype=torch.bool)
for id in torch.unique(idxs).cpu().tolist():
# if id == 0:
# continue
mask = (idxs == id).nonzero().view(-1)
keep = nms(boxes[mask], scores[mask], iou_threshold)
result_mask[mask[keep]] = True
keep = result_mask.nonzero().view(-1)
keep = keep[scores[keep].argsort(descending=True)]
return keep
def box_ops_batched_nms(boxes, scores, idxs, iou_threshold):
"""
Performs non-maximum suppression in a batched fashion.
Each index value correspond to a category, and NMS
will not be applied between elements of different categories.
Parameters
----------
boxes : Tensor[N, 4]
boxes where NMS will be performed. They
are expected to be in (x1, y1, x2, y2) format
scores : Tensor[N]
scores for each one of the boxes
idxs : Tensor[N]
indices of the categories for each one of the boxes.
iou_threshold : float
discards all overlapping boxes
with IoU < iou_threshold
Returns
-------
keep : Tensor
int64 tensor with the indices of
the elements that have been kept by NMS, sorted
in decreasing order of scores
"""
if boxes.numel() == 0:
return torch.empty((0,), dtype=torch.int64, device=boxes.device)
# strategy: in order to perform NMS independently per class.
# we add an offset to all the boxes. The offset is dependent
# only on the class idx, and is large enough so that boxes
# from different classes do not overlap
max_coordinate = boxes.max()
offsets = idxs.to(boxes) * (max_coordinate + 1)
boxes_for_nms = boxes + offsets[:, None]
keep = nms(boxes_for_nms, scores, iou_threshold)
return keep | 2,551 | 32.578947 | 104 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/layers/wrappers.py | import math
import torch
from torch.nn.modules.utils import _ntuple
class Conv2dv2(torch.nn.Conv2d):
"""
A wrapper around :class:`torch.nn.Conv2d` to support more features.
"""
def __init__(self, *args, **kwargs):
"""
Extra keyword arguments supported in addition to those in `torch.nn.Conv2d`:
Args:
norm (nn.Module, optional): a normalization layer
activation (callable(Tensor) -> Tensor): a callable activation function
It assumes that norm layer is used before activation.
"""
norm = kwargs.pop("norm", None)
activation = kwargs.pop("activation", None)
super().__init__(*args, **kwargs)
self.norm = norm
self.activation = activation
def forward(self, x):
if x.numel() == 0 and self.training:
# https://github.com/pytorch/pytorch/issues/12013
assert not isinstance(
self.norm, torch.nn.SyncBatchNorm
), "SyncBatchNorm does not support empty inputs!"
if self.norm is not None:
x = self.norm(x)
if self.activation is not None:
x = self.activation(x)
x = super().forward(x)
return x | 1,228 | 31.342105 | 84 | py |
MPMQA | MPMQA-master/detector/bua/caffe/modeling/layers/csrc/__init__.py |
from .nms import SwapAlign2Nat, swap_align2nat
__all__ = [k for k in globals().keys() if not k.startswith("_")] | 113 | 27.5 | 64 | py |
MPMQA | MPMQA-master/detector/bua/caffe/dataloader/dataset_mapper.py |
import copy
import logging
import numpy as np
import torch
import cv2
from detectron2.data import detection_utils as utils
from detectron2.data import transforms as T
from .transform_gen import ResizeShortestEdge
from .detection_utils import annotations_to_instances
"""
This file contains the default mapping that's applied to "dataset dicts".
"""
__all__ = ["DatasetMapper"]
def build_transform_gen(cfg, is_train):
"""
Create a list of :class:`TransformGen` from config.
Now it includes resizing and flipping.
Returns:
list[TransformGen]
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
logger = logging.getLogger(__name__)
tfm_gens = []
tfm_gens.append(ResizeShortestEdge(min_size, max_size, cfg.MODEL.PIXEL_MEAN))
if is_train:
logger.info("TransformGens used in training: " + str(tfm_gens))
return tfm_gens
class DatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by the model.
This is the default callable to be used to map your dataset dict into training data.
You may need to follow it to implement your own one for customized logic.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies cropping/geometric transforms to the image and annotations
3. Prepare data and annotations to Tensor and :class:`Instances`
"""
def __init__(self, cfg, is_train=True):
if cfg.INPUT.CROP.ENABLED and is_train:
self.crop_gen = T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)
logging.getLogger(__name__).info("CropGen used in training: " + str(self.crop_gen))
else:
self.crop_gen = None
self.tfm_gens = build_transform_gen(cfg, is_train)
# fmt: off
self.img_format = cfg.INPUT.FORMAT
self.mask_on = cfg.MODEL.MASK_ON
self.mask_format = cfg.INPUT.MASK_FORMAT
self.keypoint_on = cfg.MODEL.KEYPOINT_ON
self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
# fmt: on
if self.keypoint_on and is_train:
# Flip only makes sense in training
self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
else:
self.keypoint_hflip_indices = None
if self.load_proposals:
self.min_box_side_len = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
self.proposal_topk = (
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
if is_train
else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
)
self.is_train = is_train
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
# USER: Write your own image loading if it's not from a file
# image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
image = cv2.imread(dataset_dict["file_name"])
h, w = image.shape[:2]
# utils.check_image_size(dataset_dict, image)
if "annotations" not in dataset_dict:
image, transforms = T.apply_transform_gens(
([self.crop_gen] if self.crop_gen else []) + self.tfm_gens, image
)
else:
# Crop around an instance if there are instances in the image.
# USER: Remove if you don't use cropping
if self.crop_gen:
crop_tfm = utils.gen_crop_transform_with_instance(
self.crop_gen.get_crop_size(image.shape[:2]),
image.shape[:2],
np.random.choice(dataset_dict["annotations"]),
)
image = crop_tfm.apply_image(image)
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
if self.crop_gen:
transforms = crop_tfm + transforms
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32"))
dataset_dict["im_scale"] = float(image_shape[0])/ float(h)
# Can use uint8 if it turns out to be slow some day
# USER: Remove if you don't use pre-computed proposals.
if self.load_proposals:
utils.transform_proposals(
dataset_dict, image_shape, transforms, self.min_box_side_len, self.proposal_topk
)
if not self.is_train:
dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
if not self.keypoint_on:
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
utils.transform_instance_annotations(
obj, transforms, image_shape
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = annotations_to_instances(
annos, image_shape, mask_format=self.mask_format
)
# Create a tight bounding box from masks, useful when image is cropped
if self.crop_gen and instances.has("gt_masks"):
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict
| 6,394 | 37.757576 | 97 | py |
MPMQA | MPMQA-master/detector/bua/caffe/dataloader/detection_utils.py | # -*- coding: utf-8 -*-
"""
Common data processing utilities that are used in a
typical object detection data pipeline.
"""
import torch
from detectron2.structures import (
Boxes,
BoxMode,
Instances,
)
def transform_instance_annotations(
annotation, transforms, image_size, *, keypoint_hflip_indices=None
):
"""
Apply transforms to box, segmentation and keypoints annotations of a single instance.
It will use `transforms.apply_box` for the box, and
`transforms.apply_coords` for segmentation polygons & keypoints.
If you need anything more specially designed for each data structure,
you'll need to implement your own version of this function or the transforms.
Args:
annotation (dict): dict of instance annotations for a single instance.
It will be modified in-place.
transforms (TransformList):
image_size (tuple): the height, width of the transformed image
keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
Returns:
dict:
the same input dict with fields "bbox", "segmentation", "keypoints"
transformed according to `transforms`.
The "bbox_mode" field will be set to XYXY_ABS.
"""
bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
# Note that bbox is 1d (per-instance bounding box)
annotation["bbox"] = transforms.apply_box([bbox])[0]
annotation["bbox_mode"] = BoxMode.XYXY_ABS
if "attributes" in annotation:
annotation["attributes"] = annotation["attributes"]
return annotation
def annotations_to_instances(annos, image_size, mask_format="polygon"):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Args:
annos (list[dict]): a list of instance annotations in one image, each
element for one instance.
image_size (tuple): height, width
Returns:
Instances:
It will contain fields "gt_boxes", "gt_classes",
"gt_masks", "gt_keypoints", if they can be obtained from `annos`.
This is the format that builtin models expect.
"""
boxes = [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
target = Instances(image_size)
boxes = target.gt_boxes = Boxes(boxes)
boxes.clip(image_size)
classes = [obj["category_id"] for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
# attributes = [obj["attributes"] for obj in annos]
attributes = []
for obj in annos:
if "attributes" in obj.keys():
attributes.append(obj["attributes"])
else:
attributes.append([-1]*16)
attributes = torch.tensor(attributes, dtype=torch.int64)
target.gt_attributes = attributes
return target | 2,923 | 33.4 | 95 | py |
MPMQA | MPMQA-master/detector/bua/caffe/dataloader/transform_gen.py | import cv2
import PIL.Image as Image
import numpy as np
from fvcore.transforms.transform import Transform
from detectron2.data.transforms import TransformGen
class ResizeTransform(Transform):
"""
Resize the image to a target size.
"""
def __init__(self, h, w, im_scale, pixel_mean):
"""
Args:
h, w (int): original image size
im_scale: im_scale of new_h/h or new_w/w
"""
# TODO decide on PIL vs opencv
super().__init__()
self._set_attributes(locals())
def apply_image(self, img):
assert img.shape[:2] == (self.h, self.w)
img_norm = img.astype(np.float32, copy=True) - np.asarray(self.pixel_mean)
im = cv2.resize(
img_norm,
None,
None,
fx=self.im_scale,
fy=self.im_scale,
interpolation=cv2.INTER_LINEAR
)
ret = np.asarray(im)
return ret
def apply_coords(self, coords):
coords[:, 0] = coords[:, 0] * (self.im_scale)
coords[:, 1] = coords[:, 1] * (self.im_scale)
return coords
def apply_segmentation(self, segmentation):
segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
return segmentation
class ResizeShortestEdge(TransformGen):
"""
Scale the shorter edge to the given size, with a limit of `max_size` on the longer edge.
If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.
"""
def __init__(
self, min_size, max_size, pixel_mean):
"""
Args:
min_size (int): minimum allowed smallest edge length.
max_size (int): maximum allowed longest edge length.
"""
super().__init__()
self.min_size = min_size
self.max_size = max_size
self.pixel_mean = pixel_mean
self._init(locals())
def get_transform(self, img):
h, w = img.shape[:2]
im_shape = img.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(self.min_size if not type(self.min_size) is tuple else self.min_size[0]) / float(im_size_min)
# Prevent the biggest axis from being more than max_size
if np.round(im_scale * im_size_max) > self.max_size:
im_scale = float(self.max_size) / float(im_size_max)
return ResizeTransform(h, w, im_scale, self.pixel_mean)
| 2,471 | 29.518519 | 118 | py |
MPMQA | MPMQA-master/detector/bua/caffe/dataloader/__init__.py | from .dataset_mapper import DatasetMapper
__all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")] | 130 | 42.666667 | 87 | py |
MPMQA | MPMQA-master/dataset/mqa_page_contrast.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import torch
from torch.nn.utils.rnn import pad_sequence
from collections import defaultdict
from torch.utils.data import DataLoader
from transformers import T5TokenizerFast
import torch.distributed as dist
from .utils import pad_2d_mask
from .mqa_dataset import MQADataset
import sys
sys.path.insert(0, '../')
from parser import get_base_parser
class MQAContrastDataset(MQADataset):
def __init__(self, args, root, tokenizer, split='train'):
super().__init__(args, root, tokenizer, split, task='retrieval')
self.dataid2qaids = defaultdict(list)
for qaid, dataid in self.qaid2dataid.items():
self.dataid2qaids[dataid].append(qaid)
self.manual2dataids = defaultdict(list)
self.manual2qaids = defaultdict(list)
for dataid, datum in enumerate(self.data):
name = datum['image_filename'].split('/')[1]
self.manual2dataids[name].append(dataid)
self.manual2qaids[name].extend(self.dataid2qaids[dataid])
self.manuals = list(self.manual2dataids.keys())
print(f'Total {len(self.manuals)} manuals')
self.now_manual = self.manuals[0]
def get_page(self, dataid):
data_dict = self.data[dataid]
image_path = os.path.join(self.root, data_dict['image_filename'])
img = cv2.imread(image_path)
img = torch.from_numpy(img)
tokens, bboxes, segment_ids = [], [], []
for region in data_dict['bounding_boxes']:
region_bbox = region['shape']
semantic_class = region['structure']
tokens.extend(self.tokenizer.encode(self.SEMANTIC_CLS2TOKEN[semantic_class], add_special_tokens=False))
bboxes.append(self.convert_bbox(region_bbox))
segment_ids.append(self.SEMANTIC_CLS2ID[semantic_class])
if 'ocr_info' in region:
for ocr_region in region['ocr_info']:
ocr_word = ocr_region['word']
ocr_tokens = self.tokenizer.encode(ocr_word, add_special_tokens=False)
n_tokens = len(ocr_tokens)
tokens.extend(ocr_tokens)
bboxes.extend([self.convert_bbox(ocr_region['bbox'])] * n_tokens)
segment_ids.extend([self.SEMANTIC_CLS2ID[semantic_class]] * n_tokens)
assert len(tokens) == len(bboxes) == len(segment_ids)
if len(bboxes) == 0:
import pdb;pdb.set_trace()
context_ids = torch.tensor(tokens)
context_attn_mask = torch.ones(len(context_ids), dtype=torch.int64)
bboxes = torch.stack(bboxes, dim=0)
segment_ids = torch.tensor(segment_ids)
return img, context_ids, bboxes, segment_ids, context_attn_mask
def __getitem__(self, idx):
qaid = self.manual2qaids[self.now_manual][idx]
dataid = self.qaid2dataid[qaid]
qa_pair = self.qa_pairs[qaid]
question = qa_pair['question']['text']
question_dict = self.tokenizer(question, return_tensors='pt')
question_ids, question_attn_mask = question_dict['input_ids'].squeeze(dim=0), question_dict['attention_mask'].squeeze(dim=0)
question_segment_ids = torch.zeros_like(question_ids)
question_segment_ids.fill_(self.SEMANTIC_CLS2ID['Question'])
img, context_ids, bboxes, segment_ids, context_attn_masks = self.get_page(dataid)
return {
"qaids": qaid,
"dataids": dataid,
"imgs": img,
"bboxes": bboxes,
"question_ids": question_ids,
"question_attn_mask": question_attn_mask,
"question_segment_ids": question_segment_ids,
"context_ids": context_ids,
"context_attn_mask": context_attn_masks,
"segment_ids": segment_ids
}
def set_manual(self, manual_name):
self.now_manual = manual_name
def __len__(self):
return len(self.manual2qaids[self.now_manual])
def mqa_contrast_collate_fn(dict_list):
batch_dict = defaultdict(list)
for d in dict_list:
for key, value in d.items():
batch_dict[key].append(value)
batch_dict['question_ids'] = pad_sequence(batch_dict['question_ids'], batch_first=True, padding_value=0)
batch_dict['question_attn_mask'] = pad_sequence(batch_dict['question_attn_mask'], batch_first=True, padding_value=0)
batch_dict['question_segment_ids'] = pad_sequence(batch_dict['question_segment_ids'], batch_first=True, padding_value=0)
batch_dict['context_ids'] = pad_sequence(batch_dict['context_ids'], batch_first=True, padding_value=0)
batch_dict['context_attn_mask'] = pad_sequence(batch_dict['context_attn_mask'], batch_first=True, padding_value=0)
batch_dict['segment_ids'] = pad_sequence(batch_dict['segment_ids'], batch_first=True, padding_value=0)
return batch_dict | 5,535 | 41.259542 | 135 | py |
MPMQA | MPMQA-master/dataset/const.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VRM_FINE_GRAIN_CLS = [
"Product Image",
"illustration",
"graphic"
]
VRM_SEMANTIC_TOKENS = [
'<Text>',
'<Title>',
'<Img>',
'<illustration>',
'<Table>',
'<graphic>'
]
VRM_SEMANTIC_CLS2ID = {
"<pad>": 0,
"Text": 1,
"Title": 2,
"Product Image": 3,
"illustration": 4,
"Table": 5,
"graphic": 6,
"Question": 7
}
VRM_SEMANTIC_CLS2TOKEN = {
"Text": '<Text>',
"Title": '<Title>',
"Product Image": '<Img>',
'illustration': '<illustration>',
'Table': '<Table>',
'graphic': '<graphic>'
} | 1,182 | 23.142857 | 74 | py |
MPMQA | MPMQA-master/dataset/utils.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import jsonlines
def read_jsonl(path):
"""Read jsonlines file into python list
args:
path - directory of the jsonlines file
return:
jsonlines file content in List
"""
items = []
with open(path, 'r', encoding='utf-8') as f:
for item in jsonlines.Reader(f):
items.append(item)
return items
def divide_box_grid(box, row_col):
if isinstance(row_col, int):
row_col = (row_col, row_col)
row, col = row_col[0], row_col[1]
with torch.no_grad():
all_width = box[2] - box[0]
all_height = box[3] - box[1]
single_width = all_width / row
single_height = all_height / col
x1,y1,x2,y2 = box[0], box[1], box[2], box[3]
divided_boxes = []
for i in range(row):
for j in range(col):
box_ij = torch.tensor([
x1+i*single_width, y1+j*single_height,
x1+(i+1)*single_width, y1+(j+1)*single_height
])
divided_boxes.append(box_ij)
return divided_boxes
def pad_2d_mask(mask_list, padding_value=0):
"""Perform padding, convert a list of 2-d masks into batch
args:
mask_list [N x N,] - list of 2-d masks
padding_value - the value to occupy vacancy
return:
batch_mask [B x Max_N x Max_N, ] - padded batch mask
"""
max_len = float('-inf')
batch_size = len(mask_list)
for mask in mask_list:
max_len = max(len(mask), max_len)
batch_mask = torch.zeros((batch_size, max_len, max_len), dtype=mask_list[0].dtype)
batch_mask.fill_(padding_value)
for i, mask in enumerate(mask_list):
n, n = mask.shape
batch_mask[i, :n, :n] = mask
return batch_mask
def get_sub_batch(batch, start_idx=0, end_idx=1):
"""Get part of a Dict batch, use for debugging
args:
batch - a dict that contains a batch of data
start_idx - start id of the sub_batch
end_idx - end id of the sub_batch
return:
sub_batch - dict contains the batch data with id in [start_idx, end_idx-1]
"""
assert start_idx < end_idx
sub_batch = {}
for key, value in batch.items():
sub_batch[key] = value[start_idx:end_idx]
return sub_batch
| 2,864 | 30.483516 | 86 | py |
MPMQA | MPMQA-master/dataset/__init__.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | 611 | 42.714286 | 74 | py |
MPMQA | MPMQA-master/dataset/mqa_dataset.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import time
import json
import torch
import random
import numpy as np
from torch.nn.utils.rnn import pad_sequence
from collections import defaultdict
from torch.utils.data import Dataset, DataLoader
from .utils import read_jsonl, pad_2d_mask, divide_box_grid
from transformers import T5TokenizerFast
import torch.distributed as dist
from .const import VRM_SEMANTIC_TOKENS, VRM_SEMANTIC_CLS2ID, VRM_SEMANTIC_CLS2TOKEN, \
VRM_FINE_GRAIN_CLS
class MQADataset(Dataset):
def __init__(self, args, root, tokenizer, split='train', task='qa'):
super().__init__()
self.args = args
self.root = root
self.tokenizer = tokenizer
self.split = split
self.task = task
data_path = os.path.join(root, 'data', f'{split}.jsonl')
self.data = read_jsonl(data_path)
self.rid2cls = {}
for data_dict in self.data:
for region in data_dict['bounding_boxes']:
self.rid2cls[region['id']] = region['structure']
self.qa_pairs, self.qaid2dataid = self.get_qa_pairs()
self.set_const()
special_tokens = self.SEMANTIC_TOKENS.copy()
if self.args.mask:
special_tokens.append('<mask>')
self.tokenizer.add_special_tokens({"additional_special_tokens":special_tokens})
print('Special tokens:')
print(self.tokenizer.SPECIAL_TOKENS_ATTRIBUTES)
self.subwords = list(self.tokenizer.get_vocab().keys())
print(f'Total subwords: {len(self.subwords)}')
def set_use_retrieved_qa2dataid(self):
assert self.args.use_retrieved_qa2dataid
with open(self.args.retrieved_qa2dataid[self.split], 'r') as f:
retrieved_qa2dataid = json.load(f)
retrieved_qa2dataid = {int(key): value for key, value in retrieved_qa2dataid.items()}
print(f'Evaluate QA/sd with retrieved pages in {self.args.retrieved_qa2dataid[self.split]}')
assert len(retrieved_qa2dataid.keys() & self.qaid2dataid.keys()) == len(self.qaid2dataid.keys()) == len(retrieved_qa2dataid.keys())
self.qaid2dataid = retrieved_qa2dataid
def set_const(self):
self.SEMANTIC_TOKENS = VRM_SEMANTIC_TOKENS
self.SEMANTIC_CLS2ID = VRM_SEMANTIC_CLS2ID
self.SEMANTIC_CLS2TOKEN = VRM_SEMANTIC_CLS2TOKEN
self.FINE_GRAIN_CLS = VRM_FINE_GRAIN_CLS
def get_qa_pairs(self):
qaid = 0
qa_pairs = []
qaid2dataid = {}
for dataid, item in enumerate(self.data):
for qa_item in item['qa_data']:
qa_pairs.append(qa_item)
qaid2dataid[qaid] = dataid
qaid += 1
print(f'Total {len(qa_pairs)} qa pairs')
return qa_pairs, qaid2dataid
def convert_bbox(self, d):
x1, y1, w, h = d['x'], d['y'], d['width'], d['height']
x2, y2 = x1+w, y1+h
return torch.tensor([x1,y1,x2,y2])
def merge_bbox(self, box_list):
x1, y1, x2, y2 = 1e9, 1e9, -1, -1
for box in box_list:
x1 = min(x1, box[0])
y1 = min(y1, box[1])
x2 = max(x2, box[2])
y2 = max(y2, box[3])
return torch.tensor([x1,y1,x2,y2])
def __getitem__(self, idx):
qa_pair = self.qa_pairs[idx]
dataid = self.qaid2dataid[idx]
# import pdb;pdb.set_trace()
question = qa_pair['question']['text']
answer = qa_pair['answer']['text']
relevant_rids = qa_pair['answer']['relevant']
question_dict = self.tokenizer(question, return_tensors='pt')
question_ids, question_attn_mask = question_dict['input_ids'].squeeze(dim=0), question_dict['attention_mask'].squeeze(dim=0)
question_segment_ids = torch.zeros_like(question_ids)
question_segment_ids.fill_(self.SEMANTIC_CLS2ID['Question'])
# T5 treat <pad> as start token
# Default: No start token is inserted in position 0
answer_all = self.tokenizer('<pad>'+answer, return_tensors='pt').input_ids.squeeze(dim=0)
answer_ids = answer_all[:-1]
answer_labels = answer_all[1:]
answer_attn_mask = torch.tril(torch.ones((len(answer_ids), len(answer_ids)), dtype=question_attn_mask.dtype))
region_positions = defaultdict(list)
data_dict = self.data[dataid]
image_path = os.path.join(self.root, data_dict['image_filename'])
img = cv2.imread(image_path)
retry = 0
while img is None:
time.sleep(1)
img = cv2.imread(image_path)
retry += 1
if retry > 10:
assert img is not None, f'Retrying to read {image_path} for 10 times but failed'
img = torch.from_numpy(img)
bboxes = []
segment_ids = []
region_ids = []
related_region_labels = []
tokens = []
mlm_labels = [] # for whole word mlm
for r, region in enumerate(data_dict['bounding_boxes']):
is_related_region = int((region['id'] in relevant_rids))
region_positions[region['id']].append(len(tokens))
region_ids.append(region['id'])
region_bbox = region['shape']
semantic_class = region['structure']
tokens.extend(self.tokenizer.encode(self.SEMANTIC_CLS2TOKEN[semantic_class], add_special_tokens=False))
bboxes.append(self.convert_bbox(region_bbox))
segment_ids.append(self.SEMANTIC_CLS2ID[semantic_class])
related_region_labels.append(is_related_region)
if 'ocr_info' in region:
for ocr_region in region['ocr_info']:
ocr_word = ocr_region['word']
ocr_tokens = self.tokenizer.encode(ocr_word, add_special_tokens=False)
n_tokens = len(ocr_tokens)
tokens.extend(ocr_tokens)
bboxes.extend([self.convert_bbox(ocr_region['bbox'])] * n_tokens)
segment_ids.extend([self.SEMANTIC_CLS2ID[semantic_class]] * n_tokens)
if self.args.va_type == 'tokenwise':
related_region_labels.extend([is_related_region] * n_tokens)
elif self.args.va_type == 'global':
related_region_labels.extend([-1] * n_tokens)
# if len(tokens) < self.max_page_len:
region_positions[region['id']].append(len(tokens))
# else:
# region_positions[region['id']].append(self.max_page_len)
assert len(tokens) == len(bboxes) == len(segment_ids) == len(related_region_labels), "length mismatch"
if len(bboxes) == 0:
import pdb;pdb.set_trace()
context_ids = torch.tensor(tokens)
context_attn_mask = torch.ones(len(context_ids), dtype=question_attn_mask.dtype)
bboxes = torch.stack(bboxes, dim=0)
segment_ids = torch.tensor(segment_ids)
related_region_labels = torch.tensor(related_region_labels)
mlm_labels = torch.tensor(mlm_labels)
qa_ids = qa_pair['id']
return {
'qa_ids': qa_ids,
'image_paths': image_path,
'imgs': img,
'question_ids': question_ids,
'question_attn_mask': question_attn_mask,
'question_segment_ids': question_segment_ids,
'answer_ids': answer_ids,
'answer_attn_mask': answer_attn_mask,
'answer_labels': answer_labels,
'context_ids': context_ids[:self.args.max_page_len],
'context_attn_mask': context_attn_mask[:self.args.max_page_len],
'bboxes': bboxes[:self.args.max_page_len],
'segment_ids': segment_ids[:self.args.max_page_len],
'related_region_labels': related_region_labels[:self.args.max_page_len],
'region_positions': region_positions,
'related_regions': relevant_rids,
'mlm_labels': mlm_labels[:self.args.max_page_len]
}
def __len__(self):
return len(self.qa_pairs)
def mqa_collate_fn(dict_list):
batch_dict = defaultdict(list)
for d in dict_list:
for key, value in d.items():
batch_dict[key].append(value)
batch_dict['question_ids'] = pad_sequence(batch_dict['question_ids'], batch_first=True, padding_value=0)
batch_dict['question_attn_mask'] = pad_sequence(batch_dict['question_attn_mask'], batch_first=True, padding_value=0)
batch_dict['question_segment_ids'] = pad_sequence(batch_dict['question_segment_ids'], batch_first=True, padding_value=0)
batch_dict['answer_ids'] = pad_sequence(batch_dict['answer_ids'], batch_first=True, padding_value=0)
batch_dict['answer_labels'] = pad_sequence(batch_dict['answer_labels'], batch_first=True, padding_value=0)
batch_dict['answer_attn_mask'] = pad_2d_mask(batch_dict['answer_attn_mask'], padding_value=0)
batch_dict['context_ids'] = pad_sequence(batch_dict['context_ids'], batch_first=True, padding_value=0)
batch_dict['context_attn_mask'] = pad_sequence(batch_dict['context_attn_mask'], batch_first=True, padding_value=0)
batch_dict['segment_ids'] = pad_sequence(batch_dict['segment_ids'], batch_first=True, padding_value=0)
batch_dict['related_region_labels'] = pad_sequence(batch_dict['related_region_labels'], batch_first=True, padding_value=-1)
return batch_dict
def get_mqa_loader(args, root, tokenizer, batch_size, split='train', num_workers=4, eval_on_train=False):
dataset = MQADataset(args, root, tokenizer, split)
sampler = None
if hasattr(args, 'deepspeed') and args.deepspeed:
if split == 'train' and not eval_on_train:
sampler = torch.utils.data.DistributedSampler(
dataset, num_replicas=dist.get_world_size(), rank=dist.get_rank(), shuffle=True
)
else:
sampler = torch.utils.data.DistributedSampler(
dataset, num_replicas=dist.get_world_size(), rank=dist.get_rank(), shuffle=False
)
dataloader = DataLoader(
dataset=dataset,
sampler=sampler,
batch_size=batch_size,
num_workers=num_workers,
collate_fn=mqa_collate_fn,
shuffle=(split=='train' and sampler is None and not eval_on_train),
drop_last=(split=='train' and not eval_on_train)
)
return dataloader
| 11,026 | 42.413386 | 139 | py |
MPMQA | MPMQA-master/models/utils.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.distributed as dist
class MLP(nn.Module):
def __init__(self, d_in, d_hidden, d_out, non_linear='relu', res=False):
super().__init__()
self.linear_1 = nn.Linear(d_in, d_hidden)
self.linear_2 = nn.Linear(d_hidden, d_out)
if non_linear == 'relu':
self.activate = nn.ReLU()
self.res = res
if self.res:
assert d_in == d_out
def forward(self, x):
res = x
x = self.linear_1(x)
x = self.activate(x)
x = self.linear_2(x)
if self.res:
x = x + res
return x
class NCELoss(nn.Module):
def __init__(self, t=1.0, bidirectional=False):
super().__init__()
self.t = t
self.bidirectional = bidirectional
def get_loss(self, sim_matrix):
sim_matrix = sim_matrix / self.t
logpt = torch.nn.functional.log_softmax(sim_matrix, dim=-1)
logpt = torch.diag(logpt)
nce_loss = -logpt
sim_loss = nce_loss.mean()
return sim_loss
def forward(self, sim_matrix_ij, sim_matrix_ji=None):
if self.bidirectional:
if sim_matrix_ji is None:
sim_matrix_ji = sim_matrix_ij.t()
loss = (self.get_loss(sim_matrix_ij) + self.get_loss(sim_matrix_ji)) / 2
else:
loss = self.get_loss(sim_matrix_ij)
return loss
class AllGather(torch.autograd.Function):
"""An autograd function that performs allgather on a tensor."""
@staticmethod
def forward(ctx, tensor):
output = [torch.empty_like(tensor) for _ in range(dist.get_world_size())]
torch.distributed.all_gather(output, tensor)
ctx.rank = dist.get_rank()
ctx.batch_size = tensor.shape[0]
return torch.cat(output, dim=0)
@staticmethod
def backward(ctx, grad_output):
return (
grad_output[ctx.batch_size * ctx.rank : ctx.batch_size * (ctx.rank + 1)],
None,
)
class AllGatherBatch(torch.autograd.Function):
"""An autograd function that performs allgather on a tensor."""
@staticmethod
def forward(ctx, tensor):
output = [None for _ in range(dist.get_world_size())]
torch.distributed.all_gather_object(output, tensor)
output_pad = pad_features(output)
ctx.rank = dist.get_rank()
ctx.batch_size = tensor.shape[0]
ctx.length = tensor.shape[1]
return output_pad
@staticmethod
def backward(ctx, grad_output):
return (
grad_output[ctx.batch_size * ctx.rank : ctx.batch_size * (ctx.rank + 1), :ctx.length],
None,
)
def pad_features(tensors):
shapes = [t.shape for t in tensors]
total_batch = sum([s[0] for s in shapes])
rank = dist.get_rank()
dtype = tensors[rank].dtype
device = tensors[rank].device
requires_grad = tensors[rank].requires_grad
padded_shape = [total_batch]
for i in range(1, len(shapes[0])):
padded_size_i = 0
for s in shapes:
padded_size_i = max(padded_size_i, s[i])
padded_shape.append(padded_size_i)
padded_tensor = torch.zeros(padded_shape, device=device, dtype=dtype, requires_grad=requires_grad)
b_start = 0
for i, tensor in enumerate(tensors):
padded_tensor[b_start:b_start+tensor.size(0), :tensor.size(1)] = tensor
b_start += tensor.size(0)
return padded_tensor | 4,078 | 32.434426 | 102 | py |
MPMQA | MPMQA-master/models/mqa_model.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.distributed as dist
import random
from detector.ROIFeatExtractor import ROIFeatExtractor
from models.utils import NCELoss, AllGather, AllGatherBatch, pad_features, MLP
from transformers import T5ForConditionalGeneration, T5TokenizerFast
from transformers.generation_beam_search import BeamSearchScorer
from transformers.modeling_outputs import BaseModelOutputWithPastAndCrossAttentions
from torch.nn.functional import normalize as norm
class MQAT5Model(nn.Module):
def __init__(self, args, pretrained_dir='t5-base'):
super().__init__()
self.args = args
self.roi_extractor = ROIFeatExtractor(args.roi_config, args.roi_model, args.roi_bua)
self.roi_extractor.eval()
self.t5 = T5ForConditionalGeneration.from_pretrained(pretrained_dir)
self.encoder = self.t5.get_encoder()
self.decoder = self.t5.get_decoder()
self.lm_head = self.t5.get_output_embeddings()
self.tokenizer = T5TokenizerFast.from_pretrained(pretrained_dir)
self.model_dim = self.t5.model_dim
self.embed_token = self.t5.shared
self.segment_embedding = nn.Embedding(11, self.model_dim, padding_idx=0)
self.apperance_embedding = nn.Linear(2048, self.model_dim)
self.location_embedding = nn.Linear(4, self.model_dim)
if args.visual_answer:
if args.va_module_type == 'map':
self.saliency_detector = nn.Linear(self.model_dim, 2)
elif args.va_module_type == 'linear':
self.saliency_detector = nn.Sequential(
nn.Linear(self.model_dim, self.model_dim),
nn.Linear(self.model_dim, 2)
)
elif args.va_module_type == 'mlp':
self.saliency_detector = nn.Sequential(
MLP(self.model_dim, self.model_dim, self.model_dim),
nn.Linear(self.model_dim, 2)
)
else:
raise NotImplementedError
if args.page_contrast:
if args.page_contrast_module_type == 'linear':
self.page_contrast_module = nn.Linear(self.model_dim, self.model_dim)
elif args.page_contrast_module_type == 'mlp':
self.page_contrast_module = MLP(self.model_dim, self.model_dim, self.model_dim, res=True)
elif args.page_contrast_module_type is None:
pass
else:
raise NotImplementedError
self.max_dec_len = args.max_dec_len
self.ce_loss = nn.CrossEntropyLoss(ignore_index=0, reduction='mean')
self.bce_loss = nn.CrossEntropyLoss(ignore_index=-1, reduction='mean', label_smoothing=self.args.va_label_smoothing)
self.nce_loss = NCELoss(t=args.page_contrast_t, bidirectional=args.page_contrast_bidirection)
def resize_token_embeddings(self):
self.t5.resize_token_embeddings(len(self.tokenizer))
def norm_bboxes(self, bboxes):
with torch.no_grad():
x_min, y_min, x_max, y_max = bboxes[:, 0], bboxes[:, 1], bboxes[:, 2], bboxes[:, 3]
w = x_max - x_min
h = y_max - y_min
normed_bboxes = torch.stack([x_min / w, y_min / h, x_max / w, y_max / h], dim=1)
return normed_bboxes
def get_direction(self, region_boxes):
box_centers = torch.stack([region_boxes[:,0]+region_boxes[:,2],
region_boxes[:,1]+region_boxes[:,3]], dim=1) / 2
relative = norm(box_centers.unsqueeze(dim=0) - box_centers.unsqueeze(dim=1), dim=2)
angle_upper = torch.acos(relative[:,:,0])
angle_bottom = angle_upper + 0.999 * torch.pi
angle = torch.where(relative[:,:,1]>0, angle_upper, angle_bottom)
direction_labels = (angle * 4 / torch.pi).long()
direction_labels = direction_labels - (direction_labels.diag()+1).diag_embed()
return direction_labels
def combine_embedding_and_mask(self, question_embeddings, question_mask, context_embeddings, context_mask):
batch_size = question_mask.size(0)
question_lengths = question_mask.sum(dim=-1)
context_lengths = context_mask.sum(dim=-1)
total_lengths = question_lengths + context_lengths
max_len = total_lengths.max()
total_mask = torch.zeros((batch_size, max_len),
dtype=question_mask.dtype, device=question_mask.device)
total_embeddings = torch.zeros((batch_size, max_len, self.model_dim),
dtype=question_embeddings.dtype, device=question_embeddings.device)
for i in range(batch_size):
q_length_i = question_lengths[i]
c_length_i = context_lengths[i]
total_embeddings[i, :q_length_i] = question_embeddings[i, :q_length_i]
total_embeddings[i, q_length_i:q_length_i+c_length_i] = context_embeddings[i, :c_length_i]
total_mask[i, :total_lengths[i]] = 1
return total_embeddings, total_mask
def divide_embedding(self, all_embeddings, question_mask, context_mask):
batch_size = all_embeddings.size(0)
question_lengths = question_mask.sum(dim=-1)
context_lengths = context_mask.sum(dim=-1)
q_max_len = question_lengths.max()
c_max_len = context_lengths.max()
question_embeddings = torch.zeros((batch_size, q_max_len, self.model_dim),
dtype=all_embeddings.dtype, device=all_embeddings.device)
context_embeddings = torch.zeros((batch_size, c_max_len, self.model_dim),
dtype=all_embeddings.dtype, device=all_embeddings.device)
for i in range(batch_size):
q_length_i = question_lengths[i]
c_length_i = context_lengths[i]
question_embeddings[i, :q_length_i] = all_embeddings[i, :q_length_i]
context_embeddings[i, :c_length_i] = all_embeddings[i, q_length_i:q_length_i+c_length_i]
return question_embeddings, context_embeddings
def get_question_embedding(self, question_ids, question_segment_ids):
question_embed = self.embed_token(question_ids)
question_segment_embed = self.segment_embedding(question_segment_ids)
question_embed = question_embed + question_segment_embed
return question_embed
def get_context_embedding(self, context_ids, imgs, bboxes, segment_ids):
context_embed = self.embed_token(context_ids)
segment_embed = self.segment_embedding(segment_ids)
context_embed += segment_embed
# apperance_embed
with torch.no_grad():
roi_features = self.roi_extractor.float()(imgs, bboxes)
roi_features = [f.type(self.apperance_embedding.weight.dtype) for f in roi_features]
apperance_embed_list = [self.apperance_embedding(f) for f in roi_features]
apperance_embed = torch.zeros_like(context_embed)
for i, embed in enumerate(apperance_embed_list):
apperance_embed[i, :len(embed)] = embed
context_embed += apperance_embed
# location_embed
normed_bboxes = [self.norm_bboxes(bbox).type(self.location_embedding.weight.dtype) for bbox in bboxes]
location_embed_list = [self.location_embedding(b) for b in normed_bboxes]
location_embed = torch.zeros_like(context_embed)
for i, embed in enumerate(location_embed_list):
location_embed[i, :len(embed)] = embed
context_embed += location_embed
return context_embed
def get_embeddings_and_mask(self, question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, **kwargs):
question_embed = self.get_question_embedding(question_ids, question_segment_ids)
context_embed = self.get_context_embedding(context_ids, imgs, bboxes, segment_ids)
input_embeds, attn_mask = self.combine_embedding_and_mask(question_embed, question_attn_mask, context_embed, context_attn_mask)
return input_embeds, attn_mask
def context_hidden_weight(self, question_hidden, question_mask, context_hidden, context_mask, method, **kwargs):
if method == 'hard':
context_weights = kwargs['context_weights']
context_hidden = context_hidden * context_weights
all_hidden, attn_mask = self.combine_embedding_and_mask(question_hidden, question_mask, context_hidden, context_mask)
return all_hidden, attn_mask
else:
raise NotImplementedError
def beam_search(self, beam_size, question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, **kwargs):
batch_size = question_ids.size(0)
beam_scorer = BeamSearchScorer(batch_size, beam_size, device=question_ids.device, **kwargs)
input_embeds, attn_mask = self.get_embeddings_and_mask(
question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, **kwargs)
encoder = self.t5.get_encoder()
encoder_outputs = encoder(
inputs_embeds=input_embeds,
attention_mask=attn_mask,
output_attentions=False,
output_hidden_states=False,
)
encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave(
beam_size, dim=0)
attn_mask = attn_mask.repeat_interleave(
beam_size, dim=0
)
decoder_input_ids = torch.zeros((batch_size, 1), dtype=question_ids.dtype, device=question_ids.device)
decoder_input_ids = decoder_input_ids.repeat_interleave(
beam_size, dim=0
)
# import pdb;pdb.set_trace()
outputs = self.t5.beam_search(
encoder_outputs=encoder_outputs, attention_mask=attn_mask, input_ids=decoder_input_ids, beam_scorer=beam_scorer, max_length=self.max_dec_len
)
predictions = self.tokenizer.batch_decode(outputs, skip_special_tokens=True)
return outputs, predictions
def greedy_inference(self, question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, **kwargs):
input_embeds, attn_mask = self.get_embeddings_and_mask(
question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, **kwargs)
batch_size = input_embeds.size(0)
# <pad> as start token
decoder_input_ids = torch.zeros((batch_size, 1), dtype=question_ids.dtype, device=question_ids.device)
out = self.t5(inputs_embeds=input_embeds, attention_mask=attn_mask,
decoder_input_ids=decoder_input_ids, return_dict=True, use_cache=True)
past_key_values = out.past_key_values
encoder_outputs = (out.encoder_last_hidden_state,)
outputs = []
logits = out.logits
outputs.append(logits.argmax(dim=-1))
for i in range(self.max_dec_len-1):
out = self.t5(encoder_outputs=encoder_outputs, attention_mask=attn_mask, past_key_values=past_key_values,
decoder_input_ids=outputs[-1], use_cache=True)
past_key_values = out.past_key_values
logits = out.logits
outputs.append(logits.argmax(dim=-1))
outputs = torch.cat(outputs, dim=1)
predictions = self.tokenizer.batch_decode(outputs, skip_special_tokens=True)
return outputs, predictions
def mlm_inference(self, question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, mlm_labels, **kwargs):
input_embeds, attn_mask = self.get_embeddings_and_mask(
question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, **kwargs)
encoder = self.t5.get_encoder()
encoder_outputs = encoder(
inputs_embeds=input_embeds,
attention_mask=attn_mask,
output_attentions=False,
output_hidden_states=True,
)
encoder_last_hidden_states = encoder_outputs.last_hidden_state
_, context_hidden_state = self.divide_embedding(encoder_last_hidden_states, question_attn_mask, context_attn_mask)
if self.t5.config.tie_word_embeddings:
context_hidden_state = context_hidden_state * (self.model_dim**-0.5)
probs = self.mlm_head(context_hidden_state).argmax(dim=-1)
probs = probs.flatten()
labels = mlm_labels.flatten()
indices = torch.where(labels)[0]
acc_num = (probs[indices] == labels[indices]).sum()
total_num = len(indices)
return float(acc_num), float(total_num)
def visual_answer_inference(self, question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, region_positions, **kwargs):
input_embeds, attn_mask = self.get_embeddings_and_mask(
question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, **kwargs)
encoder = self.t5.get_encoder()
encoder_outputs = encoder(
inputs_embeds=input_embeds,
attention_mask=attn_mask,
output_attentions=False,
output_hidden_states=True,
)
encoder_last_hidden_states = encoder_outputs.last_hidden_state
question_hidden_state, context_hidden_state = self.divide_embedding(encoder_last_hidden_states, question_attn_mask, context_attn_mask)
saliency_probs = self.saliency_detector(context_hidden_state).softmax(dim=-1)
def aggregate_score(scores, method='mean'):
if method == 'mean':
return scores.mean()
elif method == 'first':
if len(scores) == 0:
return torch.tensor(0.0)
else:
return scores[0]
pred_related_regions = [[] for _ in range(len(saliency_probs))]
for i, saliency_prob in enumerate(saliency_probs):
region_score_list = []
for region_id, region_position in region_positions[i].items():
token_probs = saliency_prob[region_position[0]:region_position[1], 1]
method = 'mean' if self.args.va_type=='tokenwise' else 'first'
region_prob = aggregate_score(token_probs, method)
region_score_list.append((region_id, region_prob))
region_score_list.sort(key=lambda x: x[1], reverse=True)
nums = 0
for (region_id, score) in region_score_list:
if score >= 0.5:
nums += 1
pred_related_regions[i].append(region_id)
elif nums < self.args.min_va:
nums += 1
pred_related_regions[i].append(region_id)
else:
break
return pred_related_regions
def get_global_indices(self, region_positions):
batchsize = len(region_positions)
global_indices = [[] for _ in range(batchsize)]
for i in range(batchsize):
for region_id, region_position in region_positions[i].items():
global_indices[i].append(region_position[0])
return global_indices
def cross_encoding(self, question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, **kwargs):
input_embeds, attn_mask = self.get_embeddings_and_mask(
question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, **kwargs)
encoder = self.t5.get_encoder()
encoder_outputs = encoder(
inputs_embeds=input_embeds,
attention_mask=attn_mask,
output_attentions=False,
output_hidden_states=False,
)
encoder_last_hidden_states = encoder_outputs.last_hidden_state
return encoder_last_hidden_states, attn_mask
def encoding_question(self, question_ids, question_attn_mask, question_segment_ids, return_hidden=False):
question_embeddings = self.get_question_embedding(question_ids, question_segment_ids)
encoder = self.t5.get_encoder()
encoder_outputs = encoder(
inputs_embeds=question_embeddings,
attention_mask=question_attn_mask,
output_attentions=False,
output_hidden_states=False,
)
encoder_last_hidden_states = encoder_outputs.last_hidden_state
if return_hidden:
return encoder_last_hidden_states
else:
return encoder_last_hidden_states[:, 0]
def encoding_context(self, context_ids, context_attn_mask, imgs, bboxes, segment_ids, return_hidden=False):
context_embeddings = self.get_context_embedding(context_ids, imgs, bboxes, segment_ids)
encoder = self.t5.get_encoder()
encoder_outputs = encoder(
inputs_embeds=context_embeddings,
attention_mask=context_attn_mask,
output_attentions=False,
output_hidden_states=False,
)
encoder_last_hidden_states = encoder_outputs.last_hidden_state
if return_hidden:
return encoder_last_hidden_states
else:
return encoder_last_hidden_states[:, 0]
def two_stream_encoding(self, question_ids, question_attn_mask, question_segment_ids,
context_ids, context_attn_mask, imgs, bboxes, segment_ids, return_hidden=False, **kwargs):
question_features = self.encoding_question(question_ids, question_attn_mask, question_segment_ids, return_hidden)
context_features = self.encoding_context(context_ids, context_attn_mask, imgs, bboxes, segment_ids, return_hidden)
return question_features, context_features
def similarity_score(self, question_hiddens, context_hiddens, question_attn_mask=None, context_attn_mask=None):
assert len(question_hiddens.shape) == len(question_hiddens.shape) == 3
assert len(question_attn_mask.shape) == len(context_attn_mask.shape) == 2
assert len(question_attn_mask) == len(question_hiddens)
assert len(context_attn_mask) == len(context_hiddens)
b1, t1, d = question_hiddens.shape
b2, t2, d = context_hiddens.shape
score_matrix = torch.zeros((b1, b2), device=question_hiddens.device, dtype=question_hiddens.dtype, requires_grad=False)
score_matrix_2 = torch.zeros((b2, b1), device=question_hiddens.device, dtype=question_hiddens.dtype, requires_grad=False)
# token_score_matrix: b1 x b2 x t1 x t2
token_score_matrix = torch.einsum('ind,jmd->ijnm', question_hiddens, context_hiddens)
for i in range(b1):
for j in range(b2):
# t1 x t2
token_score_matrix_ij = token_score_matrix[i, j]
token_score_matrix_ij = token_score_matrix_ij[:question_attn_mask[i].sum(), :context_attn_mask[j].sum()]
score = token_score_matrix_ij.max(dim=-1)[0].mean(dim=0)
score_2 = token_score_matrix_ij.t().max(dim=-1)[0].mean(dim=0)
score_matrix[i, j] = score_matrix[i, j].clone() + score.clone()
score_matrix_2[j, i] = score_matrix_2[j, i].clone() + score_2.clone()
score_matrix.requires_grad_(True)
score_matrix_2.requires_grad_(True)
return score_matrix, score_matrix_2
def pad_features(self, tensors):
# tensors: B x T x D
shapes = [t.shape for t in tensors]
total_batch = sum([s[0] for s in shapes])
dtype = tensors[0].dtype
device = tensors[0].device
requires_grad = tensors[0].requires_grad
padded_shape = [total_batch]
for i in range(1, len(shapes[0])):
padded_size_i = 0
for s in shapes:
padded_size_i = max(padded_size_i, s[i])
padded_shape.append(padded_size_i)
padded_tensor = torch.zeros(padded_shape, device=device, dtype=dtype, requires_grad=requires_grad)
b_start = 0
for i, tensor in enumerate(tensors):
padded_tensor[b_start:b_start+tensor.size(0), :tensor.size(1)] = tensor
b_start += tensor.size(0)
return padded_tensor
def forward_page_contrast_global(self, question_hiddens, context_hiddens, compute_loss=True):
question_features = question_hiddens[:, 0]
context_features = context_hiddens[:, 0]
if self.args.page_contrast_module_type is not None:
question_features = self.page_contrast_module(question_features)
context_features = self.page_contrast_module(context_features)
question_features = norm(question_features, dim=-1)
context_features = norm(context_features, dim=-1)
question_features = AllGather.apply(question_features)
context_features = AllGather.apply(context_features)
dist.barrier()
score_matrix = torch.matmul(question_features, context_features.t())
if compute_loss:
pc_loss = self.nce_loss(score_matrix)
return pc_loss, score_matrix
else:
return score_matrix
def forward_page_contrast_tokenwise(self, question_hiddens, context_hiddens, question_attn_mask, context_attn_mask, compute_loss=True):
if self.args.page_contrast_module_type is not None:
question_hiddens = self.page_contrast_module(question_hiddens)
context_hiddens = self.page_contrast_module(context_hiddens)
question_features = norm(question_hiddens, dim=-1)
context_features = norm(context_hiddens, dim=-1)
question_features = AllGatherBatch.apply(question_features)
context_features = AllGatherBatch.apply(context_features)
_question_attn_mask = AllGatherBatch.apply(question_attn_mask)
_context_attn_mask = AllGatherBatch.apply(context_attn_mask)
dist.barrier()
score_matrix_qc, score_matrix_cq = self.similarity_score(question_features, context_features, _question_attn_mask, _context_attn_mask)
if compute_loss:
tpc_loss = self.nce_loss(score_matrix_qc, score_matrix_cq)
return tpc_loss, score_matrix_qc, score_matrix_cq
else:
return score_matrix_qc, score_matrix_cq
def forward_salient_detection(self, encoder_last_hidden_states, question_attn_mask, context_attn_mask, related_region_labels=None, compute_loss=True):
_, context_hidden_state = self.divide_embedding(encoder_last_hidden_states, question_attn_mask, context_attn_mask)
saliency_logits = self.saliency_detector(context_hidden_state)
if compute_loss:
saliency_logits_reshaped = saliency_logits.reshape(-1, saliency_logits.size(-1))
sd_loss = self.bce_loss(saliency_logits_reshaped, related_region_labels.flatten())
return sd_loss, saliency_logits
else:
return saliency_logits
def forward_sep_qa(self, question_hiddens, context_hiddens, question_attn_mask, context_attn_mask,
answer_ids, answer_attn_mask, answer_labels):
encoder_last_hidden_states, attn_mask = \
self.combine_embedding_and_mask(question_hiddens, question_attn_mask, context_hiddens, context_attn_mask)
return self.forward_text_answer(encoder_last_hidden_states, attn_mask, answer_ids, answer_attn_mask, answer_labels)
def forward_text_answer(self, encoder_last_hidden_states, attn_mask,
answer_ids, answer_attn_mask, answer_labels,
question_attn_mask=None, context_attn_mask=None, related_region_labels=None,
saliency_logits=None, region_positions=None, now_step=None, total_step=None):
assert len(encoder_last_hidden_states) > 0
decoder_outputs = self.decoder(encoder_hidden_states=encoder_last_hidden_states, encoder_attention_mask=attn_mask,
input_ids=answer_ids, attention_mask=answer_attn_mask, return_dict=True)
decoder_last_hidden_states = decoder_outputs.last_hidden_state
if self.t5.config.tie_word_embeddings:
decoder_last_hidden_states = decoder_last_hidden_states * (self.model_dim**-0.5)
logits = self.lm_head(decoder_last_hidden_states)
logits = logits.reshape(-1, logits.size(-1))
labels = answer_labels.flatten()
qa_loss = self.ce_loss(logits, labels)
return qa_loss
def forward(self, question_ids, answer_ids, context_ids, imgs, bboxes,
question_attn_mask, answer_attn_mask, context_attn_mask,
answer_labels, segment_ids, question_segment_ids, related_region_labels, **kwargs):
loss_dict = {}
# Cases of separatly encoding question and page
if self.args.page_contrast:
question_hiddens, context_hiddens = self.two_stream_encoding(question_ids, question_attn_mask, question_segment_ids,
context_ids, context_attn_mask, imgs, bboxes, segment_ids, return_hidden=True)
# Jointly encoding
if not self.args.no_cross:
encoder_last_hidden_states, attn_mask = self.cross_encoding(question_ids, context_ids, imgs, bboxes,
question_attn_mask, context_attn_mask,
segment_ids, question_segment_ids, **kwargs)
if self.args.page_contrast:
if self.args.page_contrast_type == 'global':
pc_loss, _ = self.forward_page_contrast_global(question_hiddens, context_hiddens)
loss_dict['loss_pc'] = pc_loss
elif self.args.page_contrast_type == 'tokenwise':
tpc_loss, _, _ = self.forward_page_contrast_tokenwise(question_hiddens, context_hiddens, question_attn_mask, context_attn_mask)
loss_dict['loss_tpc'] = tpc_loss
else:
raise NotImplementedError
# Calculate visual answering loss
if self.args.visual_answer:
va_loss, saliency_logits = self.forward_salient_detection(encoder_last_hidden_states, question_attn_mask, context_attn_mask, related_region_labels)
loss_dict['loss_va'] = va_loss
# Calculate question answer loss
if self.args.text_answer:
region_positions = kwargs.get('region_positions', None)
now_step = kwargs.get('now_step', None)
total_step = kwargs.get('total_step', None)
if not self.args.visual_answer:
saliency_logits = None
qa_loss = self.forward_text_answer(encoder_last_hidden_states, attn_mask, answer_ids, answer_attn_mask, answer_labels,
question_attn_mask, context_attn_mask, related_region_labels, saliency_logits, region_positions, now_step, total_step)
loss_dict['loss_qa'] = qa_loss
# Calculate total loss
loss = 0.0
for _, loss_value in loss_dict.items():
loss += loss_value
loss_dict['loss'] = loss
return loss_dict
| 28,473 | 50.397112 | 159 | py |
MPMQA | MPMQA-master/scripts/compute_metrics.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import json
from nlgeval import NLGEval, _strip
from tqdm import tqdm
from collections import OrderedDict
from sklearn.metrics import precision_score, recall_score, f1_score
PUNCTUATIONS = ["''", "'", "``", "`", "-LRB-", "-RRB-", "-LCB-", "-RCB-", \
".", "?", "!", ",", ":", "-", "--", "...", ";"]
VRM_SEMANTIC_CLS2ID = {
"<pad>": 0,
"Text": 1,
"Title": 2,
"Product Image": 3,
"illustration": 4,
"Table": 5,
"graphic": 6,
"Question": 7
}
def get_filter_list(items, class_filter):
new_items = []
for item in items:
for region_cls in set(item['gt_region_cls']):
if class_filter(VRM_SEMANTIC_CLS2ID[region_cls]):
new_items.append(item)
break
return new_items
def get_multimodal_list(items):
return get_filter_list(items, lambda x: x>=3)
def remove_punc(line):
return ' '.join([w for w in line.rstrip().split(' ') \
if w not in PUNCTUATIONS])
class NLGEvalNew(NLGEval):
def compute_metrics(self, ref_list, hyp_list):
ref_list = [list(map(_strip, refs)) for refs in zip(*ref_list)]
refs = {idx: strippedlines for (idx, strippedlines) in enumerate(ref_list)}
hyps = {idx: [lines.strip()] for (idx, lines) in enumerate(hyp_list)}
assert len(refs) == len(hyps)
ret_scores = {}
instance_scores = {}
if not self.no_overlap:
for scorer, method in self.scorers:
score, scores = scorer.compute_score(refs, hyps)
if isinstance(method, list):
for sc, scs, m in zip(score, scores, method):
ret_scores[m] = sc
instance_scores[m] = scs
else:
ret_scores[method] = score
instance_scores[method] = scores
return ret_scores, instance_scores
print('Loading evaluator...')
nlgeval = NLGEval(no_glove=True, no_skipthoughts=True)
print('Load evaluator finished')
def compute_visual_answer_metrics(pred_related_regions, gt_regions, all_regions, all_regions_cls, cls_split=None):
all_y_true = []
all_y_pred = []
instance_p = 0
instance_r = 0
instance_f1 = 0
no_preds = 0
for i, (instance_region_ids, instance_preds, instance_gts, all_region_cls) in enumerate(zip(all_regions, pred_related_regions, gt_regions, all_regions_cls)):
y_true = []
y_pred = []
if len(instance_preds) == 0:
no_preds += 1
for i, _id in enumerate(instance_region_ids):
if cls_split is not None and all_region_cls[i] != cls_split:
continue
if _id in instance_preds:
y_pred.append(1)
else:
y_pred.append(0)
if _id in instance_gts:
y_true.append(1)
else:
y_true.append(0)
if len(y_pred) == 0:
continue
# instance_p += precision_score(y_true, y_pred, average='binary')
# instance_r += recall_score(y_true, y_pred, average='binary')
# instance_f1 += f1_score(y_true, y_pred, average='binary')
all_y_true.extend(y_true)
all_y_pred.extend(y_pred)
# instance_p = instance_p / len(all_regions)
# instance_r = instance_r / len(all_regions)
# instance_f1 = instance_f1 / len(all_regions)
all_p = precision_score(all_y_true, all_y_pred, average='binary')
all_r = recall_score(all_y_true, all_y_pred, average='binary')
all_f1 = f1_score(all_y_true, all_y_pred, average='binary')
metrics = {
# 'instance_precision': instance_p,
# 'instance_recall': instance_r,
# 'instance_f1': instance_f1,
'all_precision': all_p,
'all_recall': all_r,
'all_f1': all_f1
}
if no_preds > 0:
print('#########################################')
print(f'{no_preds}/{len(all_regions)} instances has no predictions!!')
print('#########################################')
return metrics
def compute_visual_answer_by_region_cls(items, is_print=True):
pred_regions, gt_regions, all_regions, all_regions_cls = [], [], [], []
for item in items:
pred_regions.append(item['pred_regions'])
gt_regions.append(item['gt_regions'])
all_regions.append(item['all_regions'])
all_regions_cls.append(item['all_region_cls'])
cls2metrics = OrderedDict()
for type in ['Text', 'Title', 'Product Image', 'illustration', 'Table', 'graphic']:
print('Region predict of {}'.format(type))
metrics = compute_visual_answer_metrics(pred_regions, gt_regions, all_regions, all_regions_cls, cls_split=type)
cls2metrics[type] = metrics
if is_print:
for metric, score in metrics.items():
print(f'{metric}: {score:.3f}')
return cls2metrics
def compute_qa_score(items):
all_predictions = [x['caption'] for x in items]
all_answers = [x['gt'] for x in items]
all_predictions = [remove_punc(sent).lower() for sent in all_predictions]
all_answers = [[remove_punc(sent).lower() for sent in all_answers]]
metrics = nlgeval.compute_metrics(all_answers, all_predictions)
return metrics
def compute_qa_score_by_region_cls(items, is_print=True):
cls2metrics = OrderedDict()
for type in ['Text', 'Title', 'Product Image', 'illustration', 'Table', 'graphic']:
type_id = VRM_SEMANTIC_CLS2ID[type]
sub_items = get_filter_list(items, lambda x: x == type_id)
metrics = compute_qa_score(sub_items)
if is_print:
print('Questions contains {}'.format(type))
for metric, score in metrics.items():
print(f'{metric}: {score:.3f}')
cls2metrics[type] = metrics
return cls2metrics
def compute_qa_score_instance(items):
all_predictions = [x['caption'] for x in items]
all_answers = [x['gt'] for x in items]
all_predictions = [remove_punc(sent).lower() for sent in all_predictions]
all_answers = [remove_punc(sent).lower() for sent in all_answers]
metrics_list = []
assert len(all_predictions) == len(all_answers)
for prediction, answer in tqdm(zip(all_predictions, all_answers), ncols=50, total=len(all_predictions)):
metrics_dict = nlgeval.compute_individual_metrics([answer], prediction)
metrics_list.append(metrics_dict)
metrics = {}
N = len(metrics_list)
for key in metrics_list[0].keys():
metrics[key] = sum([metrics_list[i][key] for i in range(N)]) / N
for metric, score in metrics.items():
print(f'{metric}: {score:.3f}')
def item_process(input, output, task='qa'):
items = json.load(open(input))
if task == 'qa':
metrics = compute_qa_score_by_region_cls(items)
elif task == 'sd':
metrics = compute_visual_answer_by_region_cls(items)
else:
raise NotImplementedError
with open(output, 'w') as f:
json.dump(metrics, f, indent=4)
return metrics
def list_process(inputs, outputs):
for input, output in zip(inputs, outputs):
metrics = item_process(input, output)
| 7,808 | 36.724638 | 161 | py |
MPMQA | MPMQA-master/scripts/__init__.py | # Copyright(c) 2022 Liang Zhang
# E-Mail: <zhangliang00@ruc.edu.cn>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | 611 | 42.714286 | 74 | py |
surface-distance | surface-distance-master/setup.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyPI package definition."""
from setuptools import setup
setup(name="Surface Distance Based Measures",
version="0.1",
description=(
"Library containing utilities to compute performance metrics for "
"segmentation"),
url="https://github.com/deepmind/surface-distance",
author="DeepMind",
license="Apache License, Version 2.0",
packages=["surface_distance"],
install_requires=["numpy", "scipy", "absl-py"])
| 1,069 | 37.214286 | 76 | py |
surface-distance | surface-distance-master/__init__.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Surface distance module: https://github.com/deepmind/surface-distance ."""
from .surface_distance import * # pylint: disable=wildcard-import
| 743 | 42.764706 | 77 | py |
surface-distance | surface-distance-master/surface_distance_test.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple tests for surface metric computations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import google3
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import surface_distance
from surface_distance.surface_distance import metrics
class SurfaceDistanceTest(parameterized.TestCase, absltest.TestCase):
def _assert_almost_equal(self, expected, actual, places):
"""Assertion wrapper correctly handling NaN equality."""
if np.isnan(expected) and np.isnan(actual):
return
self.assertAlmostEqual(expected, actual, places)
def _assert_metrics(self,
surface_distances, mask_gt, mask_pred,
expected_average_surface_distance,
expected_hausdorff_100,
expected_hausdorff_95,
expected_surface_overlap_at_1mm,
expected_surface_dice_at_1mm,
expected_volumetric_dice,
places=3):
actual_average_surface_distance = (
surface_distance.compute_average_surface_distance(surface_distances))
for i in range(2):
self._assert_almost_equal(
expected_average_surface_distance[i],
actual_average_surface_distance[i],
places=places)
self._assert_almost_equal(
expected_hausdorff_100,
surface_distance.compute_robust_hausdorff(surface_distances, 100),
places=places)
self._assert_almost_equal(
expected_hausdorff_95,
surface_distance.compute_robust_hausdorff(surface_distances, 95),
places=places)
actual_surface_overlap_at_1mm = (
surface_distance.compute_surface_overlap_at_tolerance(
surface_distances, tolerance_mm=1))
for i in range(2):
self._assert_almost_equal(
expected_surface_overlap_at_1mm[i],
actual_surface_overlap_at_1mm[i],
places=places)
self._assert_almost_equal(
expected_surface_dice_at_1mm,
surface_distance.compute_surface_dice_at_tolerance(
surface_distances, tolerance_mm=1),
places=places)
self._assert_almost_equal(
expected_volumetric_dice,
surface_distance.compute_dice_coefficient(mask_gt, mask_pred),
places=places)
@parameterized.parameters((
np.zeros([2, 2, 2], dtype=bool),
np.zeros([2, 2], dtype=bool),
[1, 1],
), (
np.zeros([2, 2], dtype=bool),
np.zeros([2, 2, 2], dtype=bool),
[1, 1],
), (
np.zeros([2, 2], dtype=bool),
np.zeros([2, 2], dtype=bool),
[1, 1, 1],
))
def test_compute_surface_distances_raises_on_incompatible_shapes(
self, mask_gt, mask_pred, spacing_mm):
with self.assertRaisesRegex(ValueError,
'The arguments must be of compatible shape'):
surface_distance.compute_surface_distances(mask_gt, mask_pred, spacing_mm)
@parameterized.parameters((
np.zeros([2], dtype=bool),
np.zeros([2], dtype=bool),
[1],
), (
np.zeros([2, 2, 2, 2], dtype=bool),
np.zeros([2, 2, 2, 2], dtype=bool),
[1, 1, 1, 1],
))
def test_compute_surface_distances_raises_on_invalid_shapes(
self, mask_gt, mask_pred, spacing_mm):
with self.assertRaisesRegex(ValueError,
'Only 2D and 3D masks are supported'):
surface_distance.compute_surface_distances(mask_gt, mask_pred, spacing_mm)
class SurfaceDistance2DTest(SurfaceDistanceTest, parameterized.TestCase):
def test_on_2_pixels_2mm_away(self):
mask_gt = np.zeros((128, 128), bool)
mask_pred = np.zeros((128, 128), bool)
mask_gt[50, 70] = 1
mask_pred[50, 72] = 1
surface_distances = surface_distance.compute_surface_distances(
mask_gt, mask_pred, spacing_mm=(2, 1))
diag = 0.5 * math.sqrt(2**2 + 1**2)
expected_distances = {
'surfel_areas_gt': np.asarray([diag, diag, diag, diag]),
'surfel_areas_pred': np.asarray([diag, diag, diag, diag]),
'distances_gt_to_pred': np.asarray([1., 1., 2., 2.]),
'distances_pred_to_gt': np.asarray([1., 1., 2., 2.]),
}
self.assertEqual(len(expected_distances), len(surface_distances))
for key, expected_value in expected_distances.items():
np.testing.assert_array_equal(expected_value, surface_distances[key])
self._assert_metrics(
surface_distances,
mask_gt,
mask_pred,
expected_average_surface_distance=(1.5, 1.5),
expected_hausdorff_100=2.0,
expected_hausdorff_95=2.0,
expected_surface_overlap_at_1mm=(0.5, 0.5),
expected_surface_dice_at_1mm=0.5,
expected_volumetric_dice=0.0)
def test_two_squares_shifted_by_one_pixel(self):
# We make sure we do not have active pixels on the border of the image,
# because this will add additional 2D surfaces on the border of the image
# because the image is padded with background.
mask_gt = np.asarray(
[
[0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype=bool)
mask_pred = np.asarray(
[
[0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype=bool)
vertical = 2
horizontal = 1
diag = 0.5 * math.sqrt(horizontal**2 + vertical**2)
surface_distances = surface_distance.compute_surface_distances(
mask_gt, mask_pred, spacing_mm=(vertical, horizontal))
# We go from top left corner, clockwise to describe the surfaces and
# distances. The 2 surfaces are:
#
# /-\ /-\
# | | | |
# \-/ | |
# \-/
expected_surfel_areas_gt = np.asarray(
[diag, horizontal, diag, vertical, diag, horizontal, diag, vertical])
expected_surfel_areas_pred = np.asarray([
diag, horizontal, diag, vertical, vertical, diag, horizontal, diag,
vertical, vertical
])
expected_distances_gt_to_pred = np.asarray([0] * 5 + [horizontal] + [0] * 2)
expected_distances_pred_to_gt = np.asarray([0] * 5 + [vertical] * 3 +
[0] * 2)
# We sort these using the same sorting algorithm
(expected_distances_gt_to_pred, expected_surfel_areas_gt) = (
metrics._sort_distances_surfels(expected_distances_gt_to_pred,
expected_surfel_areas_gt))
(expected_distances_pred_to_gt, expected_surfel_areas_pred) = (
metrics._sort_distances_surfels(expected_distances_pred_to_gt,
expected_surfel_areas_pred))
expected_distances = {
'surfel_areas_gt': expected_surfel_areas_gt,
'surfel_areas_pred': expected_surfel_areas_pred,
'distances_gt_to_pred': expected_distances_gt_to_pred,
'distances_pred_to_gt': expected_distances_pred_to_gt,
}
self.assertEqual(len(expected_distances), len(surface_distances))
for key, expected_value in expected_distances.items():
np.testing.assert_array_equal(expected_value, surface_distances[key])
self._assert_metrics(
surface_distances,
mask_gt,
mask_pred,
expected_average_surface_distance=(
surface_distance.compute_average_surface_distance(
expected_distances)),
expected_hausdorff_100=(surface_distance.compute_robust_hausdorff(
expected_distances, 100)),
expected_hausdorff_95=surface_distance.compute_robust_hausdorff(
expected_distances, 95),
expected_surface_overlap_at_1mm=(
surface_distance.compute_surface_overlap_at_tolerance(
expected_distances, tolerance_mm=1)),
expected_surface_dice_at_1mm=(
surface_distance.compute_surface_dice_at_tolerance(
surface_distances, tolerance_mm=1)),
expected_volumetric_dice=(surface_distance.compute_dice_coefficient(
mask_gt, mask_pred)))
def test_empty_prediction_mask(self):
mask_gt = np.zeros((128, 128), bool)
mask_pred = np.zeros((128, 128), bool)
mask_gt[50, 60] = 1
surface_distances = surface_distance.compute_surface_distances(
mask_gt, mask_pred, spacing_mm=(3, 2))
self._assert_metrics(
surface_distances,
mask_gt,
mask_pred,
expected_average_surface_distance=(np.inf, np.nan),
expected_hausdorff_100=np.inf,
expected_hausdorff_95=np.inf,
expected_surface_overlap_at_1mm=(0.0, np.nan),
expected_surface_dice_at_1mm=0.0,
expected_volumetric_dice=0.0)
def test_empty_ground_truth_mask(self):
mask_gt = np.zeros((128, 128), bool)
mask_pred = np.zeros((128, 128), bool)
mask_pred[50, 60] = 1
surface_distances = surface_distance.compute_surface_distances(
mask_gt, mask_pred, spacing_mm=(3, 2))
self._assert_metrics(
surface_distances,
mask_gt,
mask_pred,
expected_average_surface_distance=(np.nan, np.inf),
expected_hausdorff_100=np.inf,
expected_hausdorff_95=np.inf,
expected_surface_overlap_at_1mm=(np.nan, 0.0),
expected_surface_dice_at_1mm=0.0,
expected_volumetric_dice=0.0)
def test_both_empty_masks(self):
mask_gt = np.zeros((128, 128), bool)
mask_pred = np.zeros((128, 128), bool)
surface_distances = surface_distance.compute_surface_distances(
mask_gt, mask_pred, spacing_mm=(3, 2))
self._assert_metrics(
surface_distances,
mask_gt,
mask_pred,
expected_average_surface_distance=(np.nan, np.nan),
expected_hausdorff_100=np.inf,
expected_hausdorff_95=np.inf,
expected_surface_overlap_at_1mm=(np.nan, np.nan),
expected_surface_dice_at_1mm=np.nan,
expected_volumetric_dice=np.nan)
class SurfaceDistance3DTest(SurfaceDistanceTest):
def test_on_2_pixels_2mm_away(self):
mask_gt = np.zeros((128, 128, 128), bool)
mask_pred = np.zeros((128, 128, 128), bool)
mask_gt[50, 60, 70] = 1
mask_pred[50, 60, 72] = 1
surface_distances = surface_distance.compute_surface_distances(
mask_gt, mask_pred, spacing_mm=(3, 2, 1))
self._assert_metrics(surface_distances, mask_gt, mask_pred,
expected_average_surface_distance=(1.5, 1.5),
expected_hausdorff_100=2.0,
expected_hausdorff_95=2.0,
expected_surface_overlap_at_1mm=(0.5, 0.5),
expected_surface_dice_at_1mm=0.5,
expected_volumetric_dice=0.0)
def test_two_cubes_shifted_by_one_pixel(self):
mask_gt = np.zeros((100, 100, 100), bool)
mask_pred = np.zeros((100, 100, 100), bool)
mask_gt[0:50, :, :] = 1
mask_pred[0:51, :, :] = 1
surface_distances = surface_distance.compute_surface_distances(
mask_gt, mask_pred, spacing_mm=(2, 1, 1))
self._assert_metrics(
surface_distances, mask_gt, mask_pred,
expected_average_surface_distance=(0.322, 0.339),
expected_hausdorff_100=2.0,
expected_hausdorff_95=2.0,
expected_surface_overlap_at_1mm=(0.842, 0.830),
expected_surface_dice_at_1mm=0.836,
expected_volumetric_dice=0.990)
def test_empty_prediction_mask(self):
mask_gt = np.zeros((128, 128, 128), bool)
mask_pred = np.zeros((128, 128, 128), bool)
mask_gt[50, 60, 70] = 1
surface_distances = surface_distance.compute_surface_distances(
mask_gt, mask_pred, spacing_mm=(3, 2, 1))
self._assert_metrics(
surface_distances, mask_gt, mask_pred,
expected_average_surface_distance=(np.inf, np.nan),
expected_hausdorff_100=np.inf,
expected_hausdorff_95=np.inf,
expected_surface_overlap_at_1mm=(0.0, np.nan),
expected_surface_dice_at_1mm=0.0,
expected_volumetric_dice=0.0)
def test_empty_ground_truth_mask(self):
mask_gt = np.zeros((128, 128, 128), bool)
mask_pred = np.zeros((128, 128, 128), bool)
mask_pred[50, 60, 72] = 1
surface_distances = surface_distance.compute_surface_distances(
mask_gt, mask_pred, spacing_mm=(3, 2, 1))
self._assert_metrics(
surface_distances, mask_gt, mask_pred,
expected_average_surface_distance=(np.nan, np.inf),
expected_hausdorff_100=np.inf,
expected_hausdorff_95=np.inf,
expected_surface_overlap_at_1mm=(np.nan, 0.0),
expected_surface_dice_at_1mm=0.0,
expected_volumetric_dice=0.0)
def test_both_empty_masks(self):
mask_gt = np.zeros((128, 128, 128), bool)
mask_pred = np.zeros((128, 128, 128), bool)
surface_distances = surface_distance.compute_surface_distances(
mask_gt, mask_pred, spacing_mm=(3, 2, 1))
self._assert_metrics(
surface_distances, mask_gt, mask_pred,
expected_average_surface_distance=(np.nan, np.nan),
expected_hausdorff_100=np.inf,
expected_hausdorff_95=np.inf,
expected_surface_overlap_at_1mm=(np.nan, np.nan),
expected_surface_dice_at_1mm=np.nan,
expected_volumetric_dice=np.nan)
if __name__ == '__main__':
absltest.main()
| 14,109 | 37.135135 | 80 | py |
surface-distance | surface-distance-master/surface_distance/lookup_tables.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lookup tables used by surface distance metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
ENCODE_NEIGHBOURHOOD_3D_KERNEL = np.array([[[128, 64], [32, 16]], [[8, 4],
[2, 1]]])
# _NEIGHBOUR_CODE_TO_NORMALS is a lookup table.
# For every binary neighbour code
# (2x2x2 neighbourhood = 8 neighbours = 8 bits = 256 codes)
# it contains the surface normals of the triangles (called "surfel" for
# "surface element" in the following). The length of the normal
# vector encodes the surfel area.
#
# created using the marching_cube algorithm
# see e.g. https://en.wikipedia.org/wiki/Marching_cubes
# pylint: disable=line-too-long
_NEIGHBOUR_CODE_TO_NORMALS = [
[[0, 0, 0]],
[[0.125, 0.125, 0.125]],
[[-0.125, -0.125, 0.125]],
[[-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]],
[[0.125, -0.125, 0.125]],
[[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25]],
[[0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[0.5, 0.0, -0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]],
[[-0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125], [-0.125, 0.125, 0.125]],
[[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25]],
[[0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]],
[[0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
[[0.5, 0.0, 0.0], [0.25, -0.25, 0.25], [-0.125, 0.125, -0.125]],
[[-0.5, 0.0, 0.0], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]],
[[0.5, 0.0, 0.0], [0.5, 0.0, 0.0]],
[[0.125, -0.125, -0.125]],
[[0.0, -0.25, -0.25], [0.0, 0.25, 0.25]],
[[-0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
[[0.0, -0.5, 0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]],
[[0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
[[0.0, 0.0, -0.5], [0.25, 0.25, 0.25], [-0.125, -0.125, -0.125]],
[[-0.125, -0.125, 0.125], [0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
[[-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]],
[[-0.125, 0.125, 0.125], [0.125, -0.125, -0.125]],
[[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [-0.125, 0.125, 0.125]],
[[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25], [0.125, -0.125, -0.125]],
[[0.125, 0.125, 0.125], [0.375, 0.375, 0.375], [0.0, -0.25, 0.25], [-0.25, 0.0, 0.25]],
[[0.125, -0.125, -0.125], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
[[0.375, 0.375, 0.375], [0.0, 0.25, -0.25], [-0.125, -0.125, -0.125], [-0.25, 0.25, 0.0]],
[[-0.5, 0.0, 0.0], [-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25], [0.125, 0.125, 0.125]],
[[-0.5, 0.0, 0.0], [-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25]],
[[0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.0, -0.25, 0.25], [0.0, 0.25, -0.25]],
[[0.0, -0.5, 0.0], [0.125, 0.125, -0.125], [0.25, 0.25, -0.25]],
[[0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.125, -0.125, 0.125], [-0.25, -0.0, -0.25], [0.25, 0.0, 0.25]],
[[0.0, -0.25, 0.25], [0.0, 0.25, -0.25], [0.125, -0.125, 0.125]],
[[-0.375, -0.375, 0.375], [-0.0, 0.25, 0.25], [0.125, 0.125, -0.125], [-0.25, -0.0, -0.25]],
[[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]],
[[-0.0, 0.0, 0.5], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]],
[[0.25, 0.25, -0.25], [0.25, 0.25, -0.25], [0.125, 0.125, -0.125], [-0.125, -0.125, 0.125]],
[[0.125, -0.125, 0.125], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
[[0.5, 0.0, 0.0], [0.25, -0.25, 0.25], [-0.125, 0.125, -0.125], [0.125, -0.125, 0.125]],
[[0.0, 0.25, -0.25], [0.375, -0.375, -0.375], [-0.125, 0.125, 0.125], [0.25, 0.25, 0.0]],
[[-0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]],
[[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0]],
[[0.0, 0.5, 0.0], [-0.25, 0.25, 0.25], [0.125, -0.125, -0.125]],
[[0.0, 0.5, 0.0], [0.125, -0.125, 0.125], [-0.25, 0.25, -0.25]],
[[0.0, 0.5, 0.0], [0.0, -0.5, 0.0]],
[[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0], [0.125, -0.125, 0.125]],
[[-0.375, -0.375, -0.375], [-0.25, 0.0, 0.25], [-0.125, -0.125, -0.125], [-0.25, 0.25, 0.0]],
[[0.125, 0.125, 0.125], [0.0, -0.5, 0.0], [-0.25, -0.25, -0.25], [-0.125, -0.125, -0.125]],
[[0.0, -0.5, 0.0], [-0.25, -0.25, -0.25], [-0.125, -0.125, -0.125]],
[[-0.125, 0.125, 0.125], [0.25, -0.25, 0.0], [-0.25, 0.25, 0.0]],
[[0.0, 0.5, 0.0], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.375, 0.375, -0.375], [-0.25, -0.25, 0.0], [-0.125, 0.125, -0.125], [-0.25, 0.0, 0.25]],
[[0.0, 0.5, 0.0], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]],
[[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
[[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0], [-0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]],
[[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]],
[[-0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.125, -0.125, 0.125], [-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]],
[[0.0, -0.25, 0.25], [0.0, -0.25, 0.25]],
[[0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125]],
[[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [-0.125, -0.125, 0.125]],
[[0.375, -0.375, 0.375], [0.0, -0.25, -0.25], [-0.125, 0.125, -0.125], [0.25, 0.25, 0.0]],
[[-0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]],
[[-0.125, -0.125, 0.125], [-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25]],
[[0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.0, 0.5, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125]],
[[-0.25, 0.25, -0.25], [-0.25, 0.25, -0.25], [-0.125, 0.125, -0.125], [-0.125, 0.125, -0.125]],
[[-0.25, 0.0, -0.25], [0.375, -0.375, -0.375], [0.0, 0.25, -0.25], [-0.125, 0.125, 0.125]],
[[0.5, 0.0, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125]],
[[-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]],
[[-0.0, 0.0, 0.5], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]],
[[-0.125, -0.125, 0.125], [-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]],
[[-0.25, -0.0, -0.25], [-0.375, 0.375, 0.375], [-0.25, -0.25, 0.0], [-0.125, 0.125, 0.125]],
[[0.0, 0.0, -0.5], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]],
[[-0.0, 0.0, 0.5], [0.0, 0.0, 0.5]],
[[0.125, 0.125, 0.125], [0.125, 0.125, 0.125], [0.25, 0.25, 0.25], [0.0, 0.0, 0.5]],
[[0.125, 0.125, 0.125], [0.25, 0.25, 0.25], [0.0, 0.0, 0.5]],
[[-0.25, 0.0, 0.25], [0.25, 0.0, -0.25], [-0.125, 0.125, 0.125]],
[[-0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]],
[[0.125, -0.125, 0.125], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
[[0.25, 0.0, 0.25], [-0.375, -0.375, 0.375], [-0.25, 0.25, 0.0], [-0.125, -0.125, 0.125]],
[[-0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
[[0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
[[-0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[-0.125, -0.125, 0.125], [0.0, -0.25, 0.25], [0.0, 0.25, -0.25]],
[[0.0, -0.5, 0.0], [0.125, 0.125, -0.125], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]],
[[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [0.125, -0.125, 0.125]],
[[0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [0.0, 0.25, -0.25]],
[[0.0, 0.25, 0.25], [0.0, 0.25, 0.25], [0.125, -0.125, -0.125]],
[[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, 0.125, 0.125]],
[[-0.0, 0.0, 0.5], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
[[-0.0, 0.5, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
[[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.125, -0.125, -0.125]],
[[0.5, 0.0, -0.0], [0.25, -0.25, -0.25], [0.125, -0.125, -0.125]],
[[-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125], [-0.25, 0.25, 0.25], [0.125, -0.125, -0.125]],
[[0.375, -0.375, 0.375], [0.0, 0.25, 0.25], [-0.125, 0.125, -0.125], [-0.25, 0.0, 0.25]],
[[0.0, -0.5, 0.0], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]],
[[-0.375, -0.375, 0.375], [0.25, -0.25, 0.0], [0.0, 0.25, 0.25], [-0.125, -0.125, 0.125]],
[[-0.125, 0.125, 0.125], [-0.25, 0.25, 0.25], [0.0, 0.0, 0.5]],
[[0.125, 0.125, 0.125], [0.0, 0.25, 0.25], [0.0, 0.25, 0.25]],
[[0.0, 0.25, 0.25], [0.0, 0.25, 0.25]],
[[0.5, 0.0, -0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125], [0.125, 0.125, 0.125]],
[[0.125, -0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, 0.125, 0.125]],
[[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25], [0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.125, -0.125, 0.125]],
[[-0.25, -0.25, 0.0], [0.25, 0.25, -0.0], [0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.25, -0.25, 0.0], [0.25, 0.25, -0.0], [0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.125, -0.125, 0.125]],
[[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25], [0.125, 0.125, 0.125]],
[[0.125, -0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, 0.125, 0.125]],
[[0.5, 0.0, -0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125], [0.125, 0.125, 0.125]],
[[0.0, 0.25, 0.25], [0.0, 0.25, 0.25]],
[[0.125, 0.125, 0.125], [0.0, 0.25, 0.25], [0.0, 0.25, 0.25]],
[[-0.125, 0.125, 0.125], [-0.25, 0.25, 0.25], [0.0, 0.0, 0.5]],
[[-0.375, -0.375, 0.375], [0.25, -0.25, 0.0], [0.0, 0.25, 0.25], [-0.125, -0.125, 0.125]],
[[0.0, -0.5, 0.0], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]],
[[0.375, -0.375, 0.375], [0.0, 0.25, 0.25], [-0.125, 0.125, -0.125], [-0.25, 0.0, 0.25]],
[[-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125], [-0.25, 0.25, 0.25], [0.125, -0.125, -0.125]],
[[0.5, 0.0, -0.0], [0.25, -0.25, -0.25], [0.125, -0.125, -0.125]],
[[0.125, 0.125, 0.125], [0.125, -0.125, -0.125]],
[[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
[[-0.0, 0.5, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
[[-0.0, 0.0, 0.5], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, 0.125, 0.125]],
[[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[0.0, 0.25, 0.25], [0.0, 0.25, 0.25], [0.125, -0.125, -0.125]],
[[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [0.0, 0.25, 0.25], [0.0, 0.25, 0.25]],
[[0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [0.125, -0.125, 0.125]],
[[0.0, -0.5, 0.0], [0.125, 0.125, -0.125], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]],
[[-0.125, -0.125, 0.125], [0.0, -0.25, 0.25], [0.0, 0.25, -0.25]],
[[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[-0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
[[0.125, 0.125, 0.125], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
[[-0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125]],
[[0.25, 0.0, 0.25], [-0.375, -0.375, 0.375], [-0.25, 0.25, 0.0], [-0.125, -0.125, 0.125]],
[[0.125, -0.125, 0.125], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
[[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25], [0.25, 0.0, 0.25]],
[[-0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[-0.25, 0.0, 0.25], [0.25, 0.0, -0.25], [-0.125, 0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.25, 0.25, 0.25], [0.0, 0.0, 0.5]],
[[0.125, 0.125, 0.125], [0.125, 0.125, 0.125], [0.25, 0.25, 0.25], [0.0, 0.0, 0.5]],
[[-0.0, 0.0, 0.5], [0.0, 0.0, 0.5]],
[[0.0, 0.0, -0.5], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]],
[[-0.25, -0.0, -0.25], [-0.375, 0.375, 0.375], [-0.25, -0.25, 0.0], [-0.125, 0.125, 0.125]],
[[-0.125, -0.125, 0.125], [-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]],
[[-0.0, 0.0, 0.5], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]],
[[-0.25, 0.0, 0.25], [0.25, 0.0, -0.25]],
[[0.5, 0.0, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125]],
[[-0.25, 0.0, -0.25], [0.375, -0.375, -0.375], [0.0, 0.25, -0.25], [-0.125, 0.125, 0.125]],
[[-0.25, 0.25, -0.25], [-0.25, 0.25, -0.25], [-0.125, 0.125, -0.125], [-0.125, 0.125, -0.125]],
[[-0.0, 0.5, 0.0], [-0.25, 0.25, -0.25], [0.125, -0.125, 0.125]],
[[0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.125, -0.125, 0.125], [-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25]],
[[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]],
[[-0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]],
[[0.375, -0.375, 0.375], [0.0, -0.25, -0.25], [-0.125, 0.125, -0.125], [0.25, 0.25, 0.0]],
[[0.0, -0.25, 0.25], [0.0, -0.25, 0.25], [-0.125, -0.125, 0.125]],
[[0.0, 0.0, 0.5], [0.25, -0.25, 0.25], [0.125, -0.125, 0.125]],
[[0.0, -0.25, 0.25], [0.0, -0.25, 0.25]],
[[-0.125, -0.125, 0.125], [-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]],
[[-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.125, -0.125, 0.125]],
[[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]],
[[0.125, 0.125, 0.125], [-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0]],
[[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0], [-0.125, -0.125, 0.125]],
[[-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0], [-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]],
[[0.0, 0.5, 0.0], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125]],
[[-0.375, 0.375, -0.375], [-0.25, -0.25, 0.0], [-0.125, 0.125, -0.125], [-0.25, 0.0, 0.25]],
[[0.0, 0.5, 0.0], [0.25, 0.25, -0.25], [-0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.125, 0.125, 0.125], [0.25, -0.25, 0.0], [-0.25, 0.25, 0.0]],
[[0.0, -0.5, 0.0], [-0.25, -0.25, -0.25], [-0.125, -0.125, -0.125]],
[[0.125, 0.125, 0.125], [0.0, -0.5, 0.0], [-0.25, -0.25, -0.25], [-0.125, -0.125, -0.125]],
[[-0.375, -0.375, -0.375], [-0.25, 0.0, 0.25], [-0.125, -0.125, -0.125], [-0.25, 0.25, 0.0]],
[[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0], [0.125, -0.125, 0.125]],
[[0.0, 0.5, 0.0], [0.0, -0.5, 0.0]],
[[0.0, 0.5, 0.0], [0.125, -0.125, 0.125], [-0.25, 0.25, -0.25]],
[[0.0, 0.5, 0.0], [-0.25, 0.25, 0.25], [0.125, -0.125, -0.125]],
[[0.25, -0.25, 0.0], [-0.25, 0.25, 0.0]],
[[-0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]],
[[0.0, 0.25, -0.25], [0.375, -0.375, -0.375], [-0.125, 0.125, 0.125], [0.25, 0.25, 0.0]],
[[0.5, 0.0, 0.0], [0.25, -0.25, 0.25], [-0.125, 0.125, -0.125], [0.125, -0.125, 0.125]],
[[0.125, -0.125, 0.125], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
[[0.25, 0.25, -0.25], [0.25, 0.25, -0.25], [0.125, 0.125, -0.125], [-0.125, -0.125, 0.125]],
[[-0.0, 0.0, 0.5], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125], [0.125, -0.125, 0.125], [-0.125, 0.125, 0.125]],
[[-0.125, 0.125, 0.125], [0.125, -0.125, 0.125]],
[[-0.375, -0.375, 0.375], [-0.0, 0.25, 0.25], [0.125, 0.125, -0.125], [-0.25, -0.0, -0.25]],
[[0.0, -0.25, 0.25], [0.0, 0.25, -0.25], [0.125, -0.125, 0.125]],
[[0.125, -0.125, 0.125], [-0.25, -0.0, -0.25], [0.25, 0.0, 0.25]],
[[0.125, -0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.0, -0.5, 0.0], [0.125, 0.125, -0.125], [0.25, 0.25, -0.25]],
[[0.0, -0.25, 0.25], [0.0, 0.25, -0.25]],
[[0.125, 0.125, 0.125], [0.125, -0.125, 0.125]],
[[0.125, -0.125, 0.125]],
[[-0.5, 0.0, 0.0], [-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25]],
[[-0.5, 0.0, 0.0], [-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25], [0.125, 0.125, 0.125]],
[[0.375, 0.375, 0.375], [0.0, 0.25, -0.25], [-0.125, -0.125, -0.125], [-0.25, 0.25, 0.0]],
[[0.125, -0.125, -0.125], [0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
[[0.125, 0.125, 0.125], [0.375, 0.375, 0.375], [0.0, -0.25, 0.25], [-0.25, 0.0, 0.25]],
[[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25], [0.125, -0.125, -0.125]],
[[0.0, -0.25, -0.25], [0.0, 0.25, 0.25], [-0.125, 0.125, 0.125]],
[[-0.125, 0.125, 0.125], [0.125, -0.125, -0.125]],
[[-0.125, -0.125, -0.125], [-0.25, -0.25, -0.25], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]],
[[-0.125, -0.125, 0.125], [0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
[[0.0, 0.0, -0.5], [0.25, 0.25, 0.25], [-0.125, -0.125, -0.125]],
[[0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
[[0.0, -0.5, 0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]],
[[-0.125, -0.125, 0.125], [0.125, -0.125, -0.125]],
[[0.0, -0.25, -0.25], [0.0, 0.25, 0.25]],
[[0.125, -0.125, -0.125]],
[[0.5, 0.0, 0.0], [0.5, 0.0, 0.0]],
[[-0.5, 0.0, 0.0], [-0.25, 0.25, 0.25], [-0.125, 0.125, 0.125]],
[[0.5, 0.0, 0.0], [0.25, -0.25, 0.25], [-0.125, 0.125, -0.125]],
[[0.25, -0.25, 0.0], [0.25, -0.25, 0.0]],
[[0.5, 0.0, 0.0], [-0.25, -0.25, 0.25], [-0.125, -0.125, 0.125]],
[[-0.25, 0.0, 0.25], [-0.25, 0.0, 0.25]],
[[0.125, 0.125, 0.125], [-0.125, 0.125, 0.125]],
[[-0.125, 0.125, 0.125]],
[[0.5, 0.0, -0.0], [0.25, 0.25, 0.25], [0.125, 0.125, 0.125]],
[[0.125, -0.125, 0.125], [-0.125, -0.125, 0.125]],
[[-0.25, -0.0, -0.25], [0.25, 0.0, 0.25]],
[[0.125, -0.125, 0.125]],
[[-0.25, -0.25, 0.0], [0.25, 0.25, -0.0]],
[[-0.125, -0.125, 0.125]],
[[0.125, 0.125, 0.125]],
[[0, 0, 0]]]
# pylint: enable=line-too-long
def create_table_neighbour_code_to_surface_area(spacing_mm):
"""Returns an array mapping neighbourhood code to the surface elements area.
Note that the normals encode the initial surface area. This function computes
the area corresponding to the given `spacing_mm`.
Args:
spacing_mm: 3-element list-like structure. Voxel spacing in x0, x1 and x2
direction.
"""
# compute the area for all 256 possible surface elements
# (given a 2x2x2 neighbourhood) according to the spacing_mm
neighbour_code_to_surface_area = np.zeros([256])
for code in range(256):
normals = np.array(_NEIGHBOUR_CODE_TO_NORMALS[code])
sum_area = 0
for normal_idx in range(normals.shape[0]):
# normal vector
n = np.zeros([3])
n[0] = normals[normal_idx, 0] * spacing_mm[1] * spacing_mm[2]
n[1] = normals[normal_idx, 1] * spacing_mm[0] * spacing_mm[2]
n[2] = normals[normal_idx, 2] * spacing_mm[0] * spacing_mm[1]
area = np.linalg.norm(n)
sum_area += area
neighbour_code_to_surface_area[code] = sum_area
return neighbour_code_to_surface_area
# In the neighbourhood, points are ordered: top left, top right, bottom left,
# bottom right.
ENCODE_NEIGHBOURHOOD_2D_KERNEL = np.array([[8, 4], [2, 1]])
def create_table_neighbour_code_to_contour_length(spacing_mm):
"""Returns an array mapping neighbourhood code to the contour length.
For the list of possible cases and their figures, see page 38 from:
https://nccastaff.bournemouth.ac.uk/jmacey/MastersProjects/MSc14/06/thesis.pdf
In 2D, each point has 4 neighbors. Thus, are 16 configurations. A
configuration is encoded with '1' meaning "inside the object" and '0' "outside
the object". The points are ordered: top left, top right, bottom left, bottom
right.
The x0 axis is assumed vertical downward, and the x1 axis is horizontal to the
right:
(0, 0) --> (0, 1)
|
(1, 0)
Args:
spacing_mm: 2-element list-like structure. Voxel spacing in x0 and x1
directions.
"""
neighbour_code_to_contour_length = np.zeros([16])
vertical = spacing_mm[0]
horizontal = spacing_mm[1]
diag = 0.5 * math.sqrt(spacing_mm[0]**2 + spacing_mm[1]**2)
# pyformat: disable
neighbour_code_to_contour_length[int("00"
"01", 2)] = diag
neighbour_code_to_contour_length[int("00"
"10", 2)] = diag
neighbour_code_to_contour_length[int("00"
"11", 2)] = horizontal
neighbour_code_to_contour_length[int("01"
"00", 2)] = diag
neighbour_code_to_contour_length[int("01"
"01", 2)] = vertical
neighbour_code_to_contour_length[int("01"
"10", 2)] = 2*diag
neighbour_code_to_contour_length[int("01"
"11", 2)] = diag
neighbour_code_to_contour_length[int("10"
"00", 2)] = diag
neighbour_code_to_contour_length[int("10"
"01", 2)] = 2*diag
neighbour_code_to_contour_length[int("10"
"10", 2)] = vertical
neighbour_code_to_contour_length[int("10"
"11", 2)] = diag
neighbour_code_to_contour_length[int("11"
"00", 2)] = horizontal
neighbour_code_to_contour_length[int("11"
"01", 2)] = diag
neighbour_code_to_contour_length[int("11"
"10", 2)] = diag
# pyformat: enable
return neighbour_code_to_contour_length
| 22,758 | 55.755611 | 101 | py |
surface-distance | surface-distance-master/surface_distance/metrics.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module exposing surface distance based measures."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import lookup_tables # pylint: disable=relative-beyond-top-level
import numpy as np
from scipy import ndimage
def _assert_is_numpy_array(name, array):
"""Raises an exception if `array` is not a numpy array."""
if not isinstance(array, np.ndarray):
raise ValueError("The argument {!r} should be a numpy array, not a "
"{}".format(name, type(array)))
def _check_nd_numpy_array(name, array, num_dims):
"""Raises an exception if `array` is not a `num_dims`-D numpy array."""
if len(array.shape) != num_dims:
raise ValueError("The argument {!r} should be a {}D array, not of "
"shape {}".format(name, num_dims, array.shape))
def _check_2d_numpy_array(name, array):
_check_nd_numpy_array(name, array, num_dims=2)
def _check_3d_numpy_array(name, array):
_check_nd_numpy_array(name, array, num_dims=3)
def _assert_is_bool_numpy_array(name, array):
_assert_is_numpy_array(name, array)
if array.dtype != bool:
raise ValueError("The argument {!r} should be a numpy array of type bool, "
"not {}".format(name, array.dtype))
def _compute_bounding_box(mask):
"""Computes the bounding box of the masks.
This function generalizes to arbitrary number of dimensions great or equal
to 1.
Args:
mask: The 2D or 3D numpy mask, where '0' means background and non-zero means
foreground.
Returns:
A tuple:
- The coordinates of the first point of the bounding box (smallest on all
axes), or `None` if the mask contains only zeros.
- The coordinates of the second point of the bounding box (greatest on all
axes), or `None` if the mask contains only zeros.
"""
num_dims = len(mask.shape)
bbox_min = np.zeros(num_dims, np.int64)
bbox_max = np.zeros(num_dims, np.int64)
# max projection to the x0-axis
proj_0 = np.amax(mask, axis=tuple(range(num_dims))[1:])
idx_nonzero_0 = np.nonzero(proj_0)[0]
if len(idx_nonzero_0) == 0: # pylint: disable=g-explicit-length-test
return None, None
bbox_min[0] = np.min(idx_nonzero_0)
bbox_max[0] = np.max(idx_nonzero_0)
# max projection to the i-th-axis for i in {1, ..., num_dims - 1}
for axis in range(1, num_dims):
max_over_axes = list(range(num_dims)) # Python 3 compatible
max_over_axes.pop(axis) # Remove the i-th dimension from the max
max_over_axes = tuple(max_over_axes) # numpy expects a tuple of ints
proj = np.amax(mask, axis=max_over_axes)
idx_nonzero = np.nonzero(proj)[0]
bbox_min[axis] = np.min(idx_nonzero)
bbox_max[axis] = np.max(idx_nonzero)
return bbox_min, bbox_max
def _crop_to_bounding_box(mask, bbox_min, bbox_max):
"""Crops a 2D or 3D mask to the bounding box specified by `bbox_{min,max}`."""
# we need to zeropad the cropped region with 1 voxel at the lower,
# the right (and the back on 3D) sides. This is required to obtain the
# "full" convolution result with the 2x2 (or 2x2x2 in 3D) kernel.
# TODO: This is correct only if the object is interior to the
# bounding box.
cropmask = np.zeros((bbox_max - bbox_min) + 2, np.uint8)
num_dims = len(mask.shape)
# pyformat: disable
if num_dims == 2:
cropmask[0:-1, 0:-1] = mask[bbox_min[0]:bbox_max[0] + 1,
bbox_min[1]:bbox_max[1] + 1]
elif num_dims == 3:
cropmask[0:-1, 0:-1, 0:-1] = mask[bbox_min[0]:bbox_max[0] + 1,
bbox_min[1]:bbox_max[1] + 1,
bbox_min[2]:bbox_max[2] + 1]
# pyformat: enable
else:
assert False
return cropmask
def _sort_distances_surfels(distances, surfel_areas):
"""Sorts the two list with respect to the tuple of (distance, surfel_area).
Args:
distances: The distances from A to B (e.g. `distances_gt_to_pred`).
surfel_areas: The surfel areas for A (e.g. `surfel_areas_gt`).
Returns:
A tuple of the sorted (distances, surfel_areas).
"""
sorted_surfels = np.array(sorted(zip(distances, surfel_areas)))
return sorted_surfels[:, 0], sorted_surfels[:, 1]
def compute_surface_distances(mask_gt,
mask_pred,
spacing_mm):
"""Computes closest distances from all surface points to the other surface.
This function can be applied to 2D or 3D tensors. For 2D, both masks must be
2D and `spacing_mm` must be a 2-element list. For 3D, both masks must be 3D
and `spacing_mm` must be a 3-element list. The description is done for the 2D
case, and the formulation for the 3D case is present is parenthesis,
introduced by "resp.".
Finds all contour elements (resp surface elements "surfels" in 3D) in the
ground truth mask `mask_gt` and the predicted mask `mask_pred`, computes their
length in mm (resp. area in mm^2) and the distance to the closest point on the
other contour (resp. surface). It returns two sorted lists of distances
together with the corresponding contour lengths (resp. surfel areas). If one
of the masks is empty, the corresponding lists are empty and all distances in
the other list are `inf`.
Args:
mask_gt: 2-dim (resp. 3-dim) bool Numpy array. The ground truth mask.
mask_pred: 2-dim (resp. 3-dim) bool Numpy array. The predicted mask.
spacing_mm: 2-element (resp. 3-element) list-like structure. Voxel spacing
in x0 anx x1 (resp. x0, x1 and x2) directions.
Returns:
A dict with:
"distances_gt_to_pred": 1-dim numpy array of type float. The distances in mm
from all ground truth surface elements to the predicted surface,
sorted from smallest to largest.
"distances_pred_to_gt": 1-dim numpy array of type float. The distances in mm
from all predicted surface elements to the ground truth surface,
sorted from smallest to largest.
"surfel_areas_gt": 1-dim numpy array of type float. The length of the
of the ground truth contours in mm (resp. the surface elements area in
mm^2) in the same order as distances_gt_to_pred.
"surfel_areas_pred": 1-dim numpy array of type float. The length of the
of the predicted contours in mm (resp. the surface elements area in
mm^2) in the same order as distances_gt_to_pred.
Raises:
ValueError: If the masks and the `spacing_mm` arguments are of incompatible
shape or type. Or if the masks are not 2D or 3D.
"""
# The terms used in this function are for the 3D case. In particular, surface
# in 2D stands for contours in 3D. The surface elements in 3D correspond to
# the line elements in 2D.
_assert_is_bool_numpy_array("mask_gt", mask_gt)
_assert_is_bool_numpy_array("mask_pred", mask_pred)
if not len(mask_gt.shape) == len(mask_pred.shape) == len(spacing_mm):
raise ValueError("The arguments must be of compatible shape. Got mask_gt "
"with {} dimensions ({}) and mask_pred with {} dimensions "
"({}), while the spacing_mm was {} elements.".format(
len(mask_gt.shape),
mask_gt.shape, len(mask_pred.shape), mask_pred.shape,
len(spacing_mm)))
num_dims = len(spacing_mm)
if num_dims == 2:
_check_2d_numpy_array("mask_gt", mask_gt)
_check_2d_numpy_array("mask_pred", mask_pred)
# compute the area for all 16 possible surface elements
# (given a 2x2 neighbourhood) according to the spacing_mm
neighbour_code_to_surface_area = (
lookup_tables.create_table_neighbour_code_to_contour_length(spacing_mm))
kernel = lookup_tables.ENCODE_NEIGHBOURHOOD_2D_KERNEL
full_true_neighbours = 0b1111
elif num_dims == 3:
_check_3d_numpy_array("mask_gt", mask_gt)
_check_3d_numpy_array("mask_pred", mask_pred)
# compute the area for all 256 possible surface elements
# (given a 2x2x2 neighbourhood) according to the spacing_mm
neighbour_code_to_surface_area = (
lookup_tables.create_table_neighbour_code_to_surface_area(spacing_mm))
kernel = lookup_tables.ENCODE_NEIGHBOURHOOD_3D_KERNEL
full_true_neighbours = 0b11111111
else:
raise ValueError("Only 2D and 3D masks are supported, not "
"{}D.".format(num_dims))
# compute the bounding box of the masks to trim the volume to the smallest
# possible processing subvolume
bbox_min, bbox_max = _compute_bounding_box(mask_gt | mask_pred)
# Both the min/max bbox are None at the same time, so we only check one.
if bbox_min is None:
return {
"distances_gt_to_pred": np.array([]),
"distances_pred_to_gt": np.array([]),
"surfel_areas_gt": np.array([]),
"surfel_areas_pred": np.array([]),
}
# crop the processing subvolume.
cropmask_gt = _crop_to_bounding_box(mask_gt, bbox_min, bbox_max)
cropmask_pred = _crop_to_bounding_box(mask_pred, bbox_min, bbox_max)
# compute the neighbour code (local binary pattern) for each voxel
# the resulting arrays are spacially shifted by minus half a voxel in each
# axis.
# i.e. the points are located at the corners of the original voxels
neighbour_code_map_gt = ndimage.filters.correlate(
cropmask_gt.astype(np.uint8), kernel, mode="constant", cval=0)
neighbour_code_map_pred = ndimage.filters.correlate(
cropmask_pred.astype(np.uint8), kernel, mode="constant", cval=0)
# create masks with the surface voxels
borders_gt = ((neighbour_code_map_gt != 0) &
(neighbour_code_map_gt != full_true_neighbours))
borders_pred = ((neighbour_code_map_pred != 0) &
(neighbour_code_map_pred != full_true_neighbours))
# compute the distance transform (closest distance of each voxel to the
# surface voxels)
if borders_gt.any():
distmap_gt = ndimage.morphology.distance_transform_edt(
~borders_gt, sampling=spacing_mm)
else:
distmap_gt = np.Inf * np.ones(borders_gt.shape)
if borders_pred.any():
distmap_pred = ndimage.morphology.distance_transform_edt(
~borders_pred, sampling=spacing_mm)
else:
distmap_pred = np.Inf * np.ones(borders_pred.shape)
# compute the area of each surface element
surface_area_map_gt = neighbour_code_to_surface_area[neighbour_code_map_gt]
surface_area_map_pred = neighbour_code_to_surface_area[
neighbour_code_map_pred]
# create a list of all surface elements with distance and area
distances_gt_to_pred = distmap_pred[borders_gt]
distances_pred_to_gt = distmap_gt[borders_pred]
surfel_areas_gt = surface_area_map_gt[borders_gt]
surfel_areas_pred = surface_area_map_pred[borders_pred]
# sort them by distance
if distances_gt_to_pred.shape != (0,):
distances_gt_to_pred, surfel_areas_gt = _sort_distances_surfels(
distances_gt_to_pred, surfel_areas_gt)
if distances_pred_to_gt.shape != (0,):
distances_pred_to_gt, surfel_areas_pred = _sort_distances_surfels(
distances_pred_to_gt, surfel_areas_pred)
return {
"distances_gt_to_pred": distances_gt_to_pred,
"distances_pred_to_gt": distances_pred_to_gt,
"surfel_areas_gt": surfel_areas_gt,
"surfel_areas_pred": surfel_areas_pred,
}
def compute_average_surface_distance(surface_distances):
"""Returns the average surface distance.
Computes the average surface distances by correctly taking the area of each
surface element into account. Call compute_surface_distances(...) before, to
obtain the `surface_distances` dict.
Args:
surface_distances: dict with "distances_gt_to_pred", "distances_pred_to_gt"
"surfel_areas_gt", "surfel_areas_pred" created by
compute_surface_distances()
Returns:
A tuple with two float values:
- the average distance (in mm) from the ground truth surface to the
predicted surface
- the average distance from the predicted surface to the ground truth
surface.
"""
distances_gt_to_pred = surface_distances["distances_gt_to_pred"]
distances_pred_to_gt = surface_distances["distances_pred_to_gt"]
surfel_areas_gt = surface_distances["surfel_areas_gt"]
surfel_areas_pred = surface_distances["surfel_areas_pred"]
average_distance_gt_to_pred = (
np.sum(distances_gt_to_pred * surfel_areas_gt) / np.sum(surfel_areas_gt))
average_distance_pred_to_gt = (
np.sum(distances_pred_to_gt * surfel_areas_pred) /
np.sum(surfel_areas_pred))
return (average_distance_gt_to_pred, average_distance_pred_to_gt)
def compute_robust_hausdorff(surface_distances, percent):
"""Computes the robust Hausdorff distance.
Computes the robust Hausdorff distance. "Robust", because it uses the
`percent` percentile of the distances instead of the maximum distance. The
percentage is computed by correctly taking the area of each surface element
into account.
Args:
surface_distances: dict with "distances_gt_to_pred", "distances_pred_to_gt"
"surfel_areas_gt", "surfel_areas_pred" created by
compute_surface_distances()
percent: a float value between 0 and 100.
Returns:
a float value. The robust Hausdorff distance in mm.
"""
distances_gt_to_pred = surface_distances["distances_gt_to_pred"]
distances_pred_to_gt = surface_distances["distances_pred_to_gt"]
surfel_areas_gt = surface_distances["surfel_areas_gt"]
surfel_areas_pred = surface_distances["surfel_areas_pred"]
if len(distances_gt_to_pred) > 0: # pylint: disable=g-explicit-length-test
surfel_areas_cum_gt = np.cumsum(surfel_areas_gt) / np.sum(surfel_areas_gt)
idx = np.searchsorted(surfel_areas_cum_gt, percent/100.0)
perc_distance_gt_to_pred = distances_gt_to_pred[
min(idx, len(distances_gt_to_pred)-1)]
else:
perc_distance_gt_to_pred = np.Inf
if len(distances_pred_to_gt) > 0: # pylint: disable=g-explicit-length-test
surfel_areas_cum_pred = (np.cumsum(surfel_areas_pred) /
np.sum(surfel_areas_pred))
idx = np.searchsorted(surfel_areas_cum_pred, percent/100.0)
perc_distance_pred_to_gt = distances_pred_to_gt[
min(idx, len(distances_pred_to_gt)-1)]
else:
perc_distance_pred_to_gt = np.Inf
return max(perc_distance_gt_to_pred, perc_distance_pred_to_gt)
def compute_surface_overlap_at_tolerance(surface_distances, tolerance_mm):
"""Computes the overlap of the surfaces at a specified tolerance.
Computes the overlap of the ground truth surface with the predicted surface
and vice versa allowing a specified tolerance (maximum surface-to-surface
distance that is regarded as overlapping). The overlapping fraction is
computed by correctly taking the area of each surface element into account.
Args:
surface_distances: dict with "distances_gt_to_pred", "distances_pred_to_gt"
"surfel_areas_gt", "surfel_areas_pred" created by
compute_surface_distances()
tolerance_mm: a float value. The tolerance in mm
Returns:
A tuple of two float values. The overlap fraction in [0.0, 1.0] of the
ground truth surface with the predicted surface and vice versa.
"""
distances_gt_to_pred = surface_distances["distances_gt_to_pred"]
distances_pred_to_gt = surface_distances["distances_pred_to_gt"]
surfel_areas_gt = surface_distances["surfel_areas_gt"]
surfel_areas_pred = surface_distances["surfel_areas_pred"]
rel_overlap_gt = (
np.sum(surfel_areas_gt[distances_gt_to_pred <= tolerance_mm]) /
np.sum(surfel_areas_gt))
rel_overlap_pred = (
np.sum(surfel_areas_pred[distances_pred_to_gt <= tolerance_mm]) /
np.sum(surfel_areas_pred))
return (rel_overlap_gt, rel_overlap_pred)
def compute_surface_dice_at_tolerance(surface_distances, tolerance_mm):
"""Computes the _surface_ DICE coefficient at a specified tolerance.
Computes the _surface_ DICE coefficient at a specified tolerance. Not to be
confused with the standard _volumetric_ DICE coefficient. The surface DICE
measures the overlap of two surfaces instead of two volumes. A surface
element is counted as overlapping (or touching), when the closest distance to
the other surface is less or equal to the specified tolerance. The DICE
coefficient is in the range between 0.0 (no overlap) to 1.0 (perfect overlap).
Args:
surface_distances: dict with "distances_gt_to_pred", "distances_pred_to_gt"
"surfel_areas_gt", "surfel_areas_pred" created by
compute_surface_distances()
tolerance_mm: a float value. The tolerance in mm
Returns:
A float value. The surface DICE coefficient in [0.0, 1.0].
"""
distances_gt_to_pred = surface_distances["distances_gt_to_pred"]
distances_pred_to_gt = surface_distances["distances_pred_to_gt"]
surfel_areas_gt = surface_distances["surfel_areas_gt"]
surfel_areas_pred = surface_distances["surfel_areas_pred"]
overlap_gt = np.sum(surfel_areas_gt[distances_gt_to_pred <= tolerance_mm])
overlap_pred = np.sum(surfel_areas_pred[distances_pred_to_gt <= tolerance_mm])
surface_dice = (overlap_gt + overlap_pred) / (
np.sum(surfel_areas_gt) + np.sum(surfel_areas_pred))
return surface_dice
def compute_dice_coefficient(mask_gt, mask_pred):
"""Computes soerensen-dice coefficient.
compute the soerensen-dice coefficient between the ground truth mask `mask_gt`
and the predicted mask `mask_pred`.
Args:
mask_gt: 3-dim Numpy array of type bool. The ground truth mask.
mask_pred: 3-dim Numpy array of type bool. The predicted mask.
Returns:
the dice coeffcient as float. If both masks are empty, the result is NaN.
"""
volume_sum = mask_gt.sum() + mask_pred.sum()
if volume_sum == 0:
return np.NaN
volume_intersect = (mask_gt & mask_pred).sum()
return 2*volume_intersect / volume_sum
| 18,367 | 40.369369 | 80 | py |
surface-distance | surface-distance-master/surface_distance/__init__.py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Surface distance module: https://github.com/deepmind/surface-distance ."""
from .metrics import * # pylint: disable=wildcard-import
__version__ = "0.1"
| 754 | 40.944444 | 77 | py |
chatgpt-refusals | chatgpt-refusals-main/classical_model_results.py | import argparse
import matplotlib.pyplot as plt
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
import data_processing
def plot_ngram_coefficients(dataset, feature_names, coefficients, text_type, n=18):
# Get the indices that would sort the coefficients by their absolute values, and select the top n
top_n_indices_by_abs_value = np.argsort(np.abs(coefficients))[-n:]
# Use these indices to select the top n coefficients and the corresponding feature names
top_n_coefficients = coefficients[top_n_indices_by_abs_value]
top_n_feature_names = np.array(feature_names)[top_n_indices_by_abs_value]
# Sort the top n by their signed values (not the absolute values), so the negative ones appear at the bottom
top_n_indices_by_signed_value = np.argsort(top_n_coefficients)
top_n_feature_names = top_n_feature_names[top_n_indices_by_signed_value]
top_n_coefficients = top_n_coefficients[top_n_indices_by_signed_value]
# Create the bar plot
plt.figure(figsize=(8, 6))
plt.barh(range(n), top_n_coefficients, align='center')
plt.yticks(range(n), top_n_feature_names, fontsize=14)
plt.xlabel('Coefficient', fontsize=16)
plt.tick_params(axis='x', labelsize=14, length=0)
plt.tick_params(axis='y', length=0)
# Save the figure as a high-resolution PDF
plt.tight_layout()
plt.savefig(f'results/ngram_coefs_{dataset}_{text_type}.pdf', dpi=600)
plt.clf()
def print_model_accuracy(y_test, y_pred, text_type, model):
accuracy = accuracy_score(y_test, y_pred)
print(f'Achieved {accuracy*100:.2f}% test accuracy in {text_type} classification with {type(model).__name__}.')
def fit_model(model, text_type, dataset, print_accuracy=False):
# Load, preprocess, and split data
X, y = data_processing.preprocess_data(f'data/{dataset}.json', text_type)
X_train, _, X_test, y_train, _, y_test = data_processing.split_data(X, y)
# Fit an TF-IDF vectorizer on training data
vectorizer = TfidfVectorizer(ngram_range=(1, 3))
X_train_tfidf = vectorizer.fit_transform(X_train)
# Transform the test set using the same vectorizer
X_test_tfidf = vectorizer.transform(X_test)
# Fit the model on the training set
model.fit(X_train_tfidf, y_train)
# Compute accuracy on the test set if called for
# If trained on quora_insincere_large_bootstrap, we use quora_insincere_hand_labeled as the test set
if dataset == 'quora_insincere_large_bootstrap':
X_test, y_test = data_processing.preprocess_data(f'data/quora_insincere_hand_labeled.json', text_type)
X_test_tfidf = vectorizer.transform(X_test)
y_pred = model.predict(X_test_tfidf)
if print_accuracy:
print_model_accuracy(y_test, y_pred, text_type, model)
if isinstance(model, LogisticRegression):
# Plot the n-gram coefficients for logistic regression
plot_ngram_coefficients(dataset, vectorizer.get_feature_names_out(), model.coef_[0], text_type)
if __name__ == '__main__':
# Parse command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--fit_random_forest_on_quora_10k', action='store_true')
args = parser.parse_args()
# Define the classical models
lr_responses_model = LogisticRegression(
C=10,
max_iter=10000,
penalty='l2',
solver='liblinear',
random_state=0
)
lr_prompts_model = LogisticRegression(
C=10,
max_iter=10000,
penalty='l2',
solver='liblinear',
random_state=0
)
rf_responses_model = RandomForestClassifier(
max_depth=None,
min_samples_leaf=1,
min_samples_split=5,
n_estimators=300,
random_state=0
)
rf_prompts_model = RandomForestClassifier(
max_depth=None,
min_samples_leaf=1,
min_samples_split=2,
n_estimators=3000,
random_state=0
)
# Get results for Table 5's classical model accuracies
print('[Table 5]')
print('Calculating classical model accuracies for dataset: Hand-Labeled...')
fit_model(lr_responses_model, 'response', 'all_hand_labeled', print_accuracy=True)
fit_model(rf_responses_model, 'response', 'all_hand_labeled', print_accuracy=True)
fit_model(lr_prompts_model, 'prompt', 'quora_insincere_large_bootstrap', print_accuracy=True)
if args.fit_random_forest_on_quora_10k:
fit_model(rf_prompts_model, 'prompt', 'quora_insincere_large_bootstrap', print_accuracy=True)
else:
print('Skipping random forest prompt classifier fitting.')
print()
# Get results for Fig. 2's n-gram coefficients
print('[Fig. 2]')
print('Calculating n-gram coefficients for dataset: Hand-Labeled...')
#fit_model(lr_responses_model, 'response', 'all_hand_labeled') # this was already done above
fit_model(lr_prompts_model, 'prompt', 'all_hand_labeled')
print()
# Get results for Fig. 3's n-gram coefficients
print('[Fig. 3]')
print('Calculating n-gram coefficients for dataset: Bootstrapped Quora Insincere Questions...')
fit_model(lr_responses_model, 'response', 'quora_insincere_large_bootstrap')
#fit_model(lr_prompts_model, 'prompt', 'quora_insincere_large_bootstrap') # this was already done above
print()
print('Finished; n-gram coefficients were written to the "results" folder.')
| 5,598 | 41.097744 | 115 | py |
chatgpt-refusals | chatgpt-refusals-main/data_processing.py | import json
import pandas as pd
from sklearn.model_selection import train_test_split
def preprocess_data(file_path, text_source):
with open(file_path, 'r') as file:
data = json.load(file)
df = pd.DataFrame(data)
# Filter out unwanted classes
df = df.loc[~df['tone'].isin(['incoherent', 'dontknow'])].copy()
# Change any label that isn't "complied" to "rejected"
df.loc[~df['tone'].isin(['complied', 'rejected']), 'tone'] = 'rejected'
X = df[text_source].tolist()
y = df['tone'].tolist()
return X, y
def split_data(X, y):
# This yields a 70/15/15 train/validation/test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
X_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size=0.5, random_state=0)
return X_train, X_val, X_test, y_train, y_val, y_test
| 883 | 31.740741 | 98 | py |
chatgpt-refusals | chatgpt-refusals-main/bert_results.py | import os
import torch
from transformers import BertTokenizerFast, BertForSequenceClassification, Trainer, TrainingArguments
import data_processing
class TextDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
class TextClassification:
def __init__(self, X, y, label_encoder):
self.tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased')
self.label_encoder = label_encoder
self.y = self.label_encoder.transform(y)
self.dataset = self._prepare_dataset(X, self.y)
def _prepare_dataset(self, X, y):
encodings = self.tokenizer(X, truncation=True, padding='longest', max_length=512)
return TextDataset(encodings, y)
def inference(X, y, text_source):
# Load the label encoder that was saved during training
label_encoder = torch.load(f'bert_assets/{text_source}/label_encoder.pth')
# Prepare the dataset for inference using the TextClassification class
classifier = TextClassification(X, y, label_encoder)
# Load the model
model = BertForSequenceClassification.from_pretrained(f'bert_assets/{text_source}')
model = model.to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))
trainer = Trainer(model=model, args=TrainingArguments(output_dir='bert_assets'))
# Run inference
predictions = trainer.predict(classifier.dataset)
predicted_labels = classifier.label_encoder.inverse_transform(predictions.predictions.argmax(-1))
return predicted_labels
def evaluate_model(dataset, text_source):
filepath = f'data/{dataset}.json'
print(f'Classifying {text_source}s in {filepath}...')
# Load and split the data
X, y = data_processing.preprocess_data(filepath, text_source)
if dataset == 'all_hand_labeled':
_, _, X_test, _, _, y_test = data_processing.split_data(X, y)
elif dataset == 'quora_insincere_hand_labeled':
X_test = X
y_test = y
# Run inference to get the model's predictions on the test set
predictions = inference(X_test, y_test, text_source)
# Calculate and print the model's accuracy
correct_predictions = sum(pred == true for pred, true in zip(predictions, y_test))
accuracy = correct_predictions / len(y_test)
print(f'Accuracy: {accuracy*100:.2f}%')
if __name__ == '__main__':
# Disable tokenizers parallelism to avoid a warning
os.environ["TOKENIZERS_PARALLELISM"] = "false"
evaluate_model('all_hand_labeled', 'response')
print()
evaluate_model('quora_insincere_hand_labeled', 'prompt')
| 2,872 | 35.367089 | 101 | py |
SHIPS | SHIPS-master/PSFfitting.py | ############################
# Date: 06/04/2020
# Title: PSF-fitting script for SHIPS
# Author: J. Bodensteiner (2019). Edits: A. Rainot (2020)
# Description: Use this script to extract a spectrum of sources in IRDIS images with the SHIPS pipeline.
# VIP version: 0.9.11 (Rainot edit.)
# Python version: 3 ONLY
############################
# Load libraries
import lmfit
import pandas as pd
import numpy as np
from astropy.io import fits
from astropy.table import Table
from astropy.stats import sigma_clipped_stats
from astropy.visualization import simple_norm
from astropy.modeling.fitting import LevMarLSQFitter
from photutils.psf import photometry, DAOGroup, EPSFBuilder
from photutils import CircularAperture, CircularAnnulus, aperture_photometry
from astropy.nddata import NDData
from photutils.psf import extract_stars
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
##############################################################################
# 0. class and function definition
##############################################################################
class Star:
def __init__(self, xcoord, ycoord, star_id, mag='None'):
self.xcoord = xcoord # pixel x coordinate
self.ycoord = ycoord # pixel y coordinate
self.star_id = star_id # star id
self.magnitude = mag # magnitude from input list
############################################################################
# write a fits file containing a 2D image
def write_2Dimage(header, image, outfilename):
hdul_new = fits.HDUList()
hdul_new.append(fits.PrimaryHDU(data=image, header=header))
hdul_new.writeto(outfilename)
# round odd PSF size to the lower odd number
def round_down_to_odd(f):
return np.ceil(f) // 2 * 2 - 1
# Gaussian function
def gaussian(x, height, center, std):
return (height * np.exp(-1 * (x - center)**2 / (2*std**2)))
# fit a Gaussian
def f_single(params, wavelength, fluxes):
h = params['h']
std = params['std']
cen = params['cen']
g = gaussian(wavelength, h, cen, std)
error = (fluxes - g)**2
return error
# get PSF from the image of the central star
def prep_psf(psf_data, x_star, y_star, n_resample=6, plot=False):
# create bkg subtracted cutouts around stars used for creation of ePSF
stars_tbl = Table()
stars_tbl['x'] = [x_star] # coordinates of central star
stars_tbl['y'] = [y_star]
# subtract a median background from the PSF image
mean_val, median_val, std_val = sigma_clipped_stats(psf_data, sigma=2.)
psf_data -= median_val
# extract 61 x 61 px cutouts around the stars => whole image
stars = extract_stars(NDData(data=psf_data), stars_tbl, size=61)
# initialize EPSF Builder object with required oversampling factor
n_resample = 6
epsf_builder = EPSFBuilder(oversampling=n_resample, maxiters=10,
progress_bar=False)
# build the actual ePSF from the cutouts
epsf, fitted_stars = epsf_builder(stars)
# plot cuts through the ePSF at different x positions
len_x = len(epsf.data[0, :])
x = np.linspace(0, 100, len_x)
params = lmfit.Parameters()
params.add('h', 0.03, min=0.025, max=0.045, vary=True)
params.add('std', 5, min=1, max=20, vary=True)
params.add('cen', 50, min=47, max=53, vary=True)
xvals = [int(len(x)/4), int(len(x)/2.1), int(len(x)/2)]
cutthrough = epsf.data[int(len(x)/2), :]
minimizer = lmfit.Minimizer(f_single, params, fcn_args=(x, cutthrough))
result = minimizer.minimize()
h, gauss_std = result.params['h'], result.params['std']
cen = result.params['cen']
if plot is True:
# plot the star (if its only one)
fig_stars, ax_stars = plt.subplots()
norm = simple_norm(stars[0], 'log', percent=99.)
colbar_in = ax_stars.imshow(stars[0], origin='lower', cmap='inferno',
norm=norm)
fig_stars.colorbar(colbar_in)
fig_stars.suptitle('Star used to fit the PSF')
# plt.show()
fig_psfima, ax_psf_ima = plt.subplots()
colbar = ax_psf_ima.imshow(epsf.data, origin='lower', cmap='inferno')
fig_psfima.suptitle("Fitted ePSF")
fig_psfima.colorbar(colbar)
fig_psf, ax_psf = plt.subplots()
for xval in xvals:
cutthrough = epsf.data[xval, :]
lab = 'x = ' + str(xval)
ax_psf.plot(x, cutthrough, label=lab)
g = gaussian(x, h, cen, gauss_std)
ax_psf.plot(x, g, label='Gaussian fit')
ax_psf.set_xlabel('y coordinate')
ax_psf.set_ylabel('ePSF')
ax_psf.legend()
plt.show()
return epsf, gauss_std
# Define the PSF-fitting function
def psf_fitting(array, input_psf, fwhm, comp_pos, psf_pos, full_output=False):
""" Applies PSF fitting to given sources in order to retrieve positions & flux parameters with errors.
Parameters
----------
array : array_like
Input 2D frame.
input_psf : array_like
2d array psf.
fwhm : float
FWHM of the PSF.
comp_pos : float or array_like
Positions (X,Y) of companions.
psf_pos : float or array_like
Positions (X,Y) of the center of the PSF.
full_output : True or False
Plots the fit on positions and the residual images.
Returns
-------
x_comp : array_like
Best-fit X positions for every input companion.
xerr_comp : array_like
Best-fit X positional errors for every input companion.
y_comp : array_like
Best-fit Y positions for every input companion.
yerr_comp : array_like
Best-fit Y positional errors for every input companion.
f_comp : array_like
Best-fit flux photometry for every input companion.
ferr_comp : array_like
Best-fit flux photometry errors for every input companion.
if full_output = True
contrast_mag : array_like
Contrast magnitude of sources
contrast_mag : array_like
Contrast magnitude errors of sources
"""
pos_comp = np.array(comp_pos) # Create a usuable array for the companion positions
x_centralstar, y_centralstar = psf_pos[0], psf_pos[1] # pixel
psf_data = input_psf
epsf, gauss_std = prep_psf(psf_data, x_centralstar, y_centralstar, plot=False)
############################################################################
# definitions for the photometry
############################################################################
aper_rad = 1.*fwhm
fshape = int(round_down_to_odd(len(input_psf[0])))
phot = photometry.BasicPSFPhotometry(group_maker=DAOGroup(15.),
psf_model=epsf,
bkg_estimator=None,
fitter=LevMarLSQFitter(),
fitshape=(fshape),
aperture_radius=aper_rad)
############################################################################
# Measure the flux of the central star
############################################################################
pos = Table(names=['x_0', 'y_0'], data=[[x_centralstar], [y_centralstar]])
result_tab = phot.do_photometry(image=psf_data, init_guesses=pos)
flux_star, fluxerr_star = result_tab['flux_fit'][0], result_tab['flux_unc'][0]
print("######################################################################")
print("The guess flux is : %2.4f" % result_tab['flux_0'])
print("The measured flux of the central star is: % 2.4f + /- % 2.4f"
% (flux_star, fluxerr_star))
print("######################################################################")
residual_image = phot.get_residual_image()
# plot the input image and the residual image for the central star
fig_res = plt.figure(figsize=(8, 8))
ax1 = fig_res.add_subplot(1, 2, 1)
norm = simple_norm(psf_data, 'sqrt', percent=95.)
ax1.imshow(psf_data, cmap='viridis', aspect=1, origin='lower', norm=norm)
ax1.set_title('Input Image')
# mark input position of central star
e = Circle(xy=(x_centralstar, y_centralstar), radius=aper_rad)
e.set_facecolor('none')
e.set_edgecolor(color='k')
ax1.add_artist(e)
ax2 = fig_res.add_subplot(1, 2, 2)
ax2.imshow(residual_image, cmap='viridis', aspect=1, origin='lower')
ax2.set_title('Residual Image')
# mark fitted position of central star
e = Circle(xy=(result_tab['x_fit'], result_tab['y_fit']), radius=aper_rad)
e.set_facecolor('none')
e.set_edgecolor(color='crimson')
ax2.add_artist(e)
plt.show()
############################################################################
############################################################################
############################################################################
# Load in the actual data
############################################################################
image = array
##############################################################################
# Coordinates of companions
# xpos, ypos, ids = table['x'], table['y'], table['star_id']
xpos = pos_comp[:,0]
ypos = pos_comp[:,1]
ids = np.linspace(1,len(pos_comp),len(pos_comp))
# make a list of star objects of class star from input file
starlist = list()
for i in range(1, len(xpos)):
s = Star(xpos[i], ypos[i], ids[i])
starlist.append(s)
# plot them all on the image
cols = ['crimson', 'cyan', 'C1', 'deepskyblue', 'limegreen', 'magenta',
'limegreen', 'red', 'cyan', 'C1', 'deepskyblue',
'crimson', 'cyan', 'C1', 'deepskyblue', 'limegreen', 'magenta',
'limegreen', 'red', 'cyan', 'C1', 'deepskyblue']
fig, ax = plt.subplots(figsize=(8, 8))
norm = simple_norm(image, 'sqrt', percent=95.)
im1 = ax.imshow(image, aspect=1, origin='lower', cmap='Greys', norm=norm)
for s in range(len(starlist)):
star = starlist[s]
e = Circle(xy=(star.xcoord, star.ycoord), radius=2.5)
e.set_facecolor('none')
e.set_edgecolor(color=cols[s],)
ax.add_artist(e)
ax.annotate(str(star.star_id), (star.xcoord, star.ycoord), (3, 10),
color=cols[s], textcoords='offset points', fontsize=12)
plt.xlabel("x [px]", fontsize=13)
plt.ylabel("y [px]", fontsize=13)
plt.show()
# define plotting window and
wind = 25
rad = aper_rad
# define arrays to store values
x_comp = np.zeros_like(xpos)
xerr_comp = np.zeros_like(xpos)
y_comp = np.zeros_like(xpos)
yerr_comp = np.zeros_like(xpos)
f_comp = np.zeros_like(xpos)
ferr_comp = np.zeros_like(xpos)
c_comp = np.zeros_like(xpos) # Contrast magnitudes
cerr_comp = np.zeros_like(xpos) # Contrast magnitude errors
# LOOP OVER STARS
for i in range(len(xpos)):
star = starlist[i]
x_pos, y_pos = [], []
x_pos.append(star.xcoord)
y_pos.append(star.ycoord)
# center of the frames
cy, cx = round(image.shape[0]/2), round(image.shape[1]/2)
# star-center distances
xy = (star.xcoord-cx, star.ycoord-cy)
# radial distance from center
r_pos = np.sqrt(xy[0]**2 + xy[1]**2)
annulus = CircularAnnulus((star.xcoord, star.ycoord), 15, 25)
phot_annulus = aperture_photometry(image, annulus, method='subpixel',
subpixels=5)
mean_bkg = phot_annulus['aperture_sum'] / annulus.area
# aperture = CircularAperture((star.xcoord, star.ycoord), fwhm/2.)
# annulus = CircularAnnulus((cx, cy), r_pos-fwhm, r_pos+fwhm)
# phot_aperture = aperture_photometry(image, aperture, method='subpixel',
# subpixels=5)
# phot_annulus = aperture_photometry(image, annulus, method='subpixel',
# subpixels=5)
# mean_background = (phot_annulus['aperture_sum'] -
# phot_aperture['aperture_sum'])/(annulus.area -
# aperture.area)
# print(mean_background)
# prepare the image => correct local background around the source
for h in range(image.shape[0]):
for j in range(image.shape[1]):
image[h][j] -= mean_bkg[0]
pos = Table(names=['x_0', 'y_0'], data=[x_pos, y_pos])
result_tab = phot.do_photometry(image=image, init_guesses=pos)
residual_image = phot.get_residual_image()
x_comp[i] = result_tab['x_fit'][0]
xerr_comp[i] = result_tab['x_0_unc'][0]
y_comp[i] = result_tab['y_fit'][0]
yerr_comp[i] = result_tab['y_0_unc'][0]
f_comp[i] = result_tab['flux_fit'][0]
ferr_comp[i] = result_tab['flux_unc'][0]
flux_comp = result_tab['flux_fit'][0]
fluxerr_comp = result_tab['flux_unc'][0]
flux_ratio = flux_comp / flux_star
flux_ratio_err = flux_ratio * ((fluxerr_comp / flux_comp)**2 +
(fluxerr_star / flux_star)**2)**0.5
contrast_mag = -2.5 * np.log10(flux_ratio)
contrast_mag_err = 2.5 / np.log(10) * ((fluxerr_comp / flux_comp)**2 +
(fluxerr_star / flux_star)**2)**0.5
print("-----------------------------------------------------------------")
print("Star " + str(star.star_id))
print("The measured xpos is: %2.4f +/- %2.4f" % (x_comp[i], xerr_comp[i]))
print("The measured ypos is: %2.4f +/- %2.4f" % (y_comp[i], yerr_comp[i]))
print("The measured flux is: %2.4f +/- %2.4f" % (flux_comp, fluxerr_comp))
if full_output == True:
print("The contrast magnitude is: %2.4f +/- %2.4f" % (contrast_mag, contrast_mag_err))
c_comp[i] = contrast_mag
cerr_comp[i] = contrast_mag_err
# plot the result
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 2, 1)
norm = simple_norm(image, 'sqrt', percent=95.)
ax.imshow(image, cmap='viridis', aspect=1, origin='lower', norm=norm)
ax.set_title('Star ' + str(star.star_id))
e = Circle(xy=(star.xcoord, star.ycoord), radius=aper_rad)
e.set_facecolor('none')
e.set_edgecolor(color='k')
ax.add_artist(e)
ax.set_xlim([star.xcoord+wind, star.xcoord-wind])
ax.set_ylim([star.ycoord+wind, star.ycoord-wind])
ax.set_xlabel('x coordinate [px]')
ax.set_ylabel('y coordinate [px]')
ax2 = fig.add_subplot(1, 2, 2)
norm = simple_norm(image, 'sqrt', percent=95.)
ax2.imshow(residual_image, cmap='viridis', aspect=1,
origin='lower', norm=norm)
e = Circle(xy=(result_tab['x_fit'], result_tab['y_fit']), radius=aper_rad)
e.set_facecolor('none')
e.set_edgecolor(color='crimson')
ax2.add_artist(e)
e = Circle(xy=(star.xcoord, star.ycoord), radius=aper_rad)
e.set_facecolor('none')
e.set_edgecolor(color='k')
ax2.add_artist(e)
ax2.set_xlim([star.xcoord+wind, star.xcoord-wind])
ax2.set_ylim([star.ycoord+wind, star.ycoord-wind])
ax2.set_title('Residual Image')
ax2.set_xlabel('x coordinate [px]')
ax2.set_ylabel('y coordinate [px]')
plt.show()
print("-----------------------------------------------------------------")
if full_output == True:
return x_comp,xerr_comp,y_comp,yerr_comp,f_comp,ferr_comp,flux_star,fluxerr_star,c_comp,cerr_comp
else:
return x_comp,xerr_comp,y_comp,yerr_comp,f_comp,ferr_comp,flux_star,fluxerr_star
| 15,962 | 39.10804 | 106 | py |
SHIPS | SHIPS-master/ships_ifs.py | ############################
# Date: 07/04/2020
# Title: Running script for SHIPS for IFS data
# Description: Use this script to run SHIPS for IFS data. In this script you'll find all the necessary parameters to run SHIPS. ONLY SPHERE-DC DATA FOR NOW. VIP is used.
# VIP version: 0.9.11 (Rainot edit.)
# Python version: 3 ONLY
############################
# Set up your parameters
## Define images to analyse
cube_filepath = '/Users/alan/Documents/PhD/Data/SPHERE/IFS/QZCardone/ifs_sortframes_dc-IFS_SCIENCE_REDUCED_SPECTRAL_MASTER_CUBE_SORTED-center_im_sorted.fits'
wavelength_filepath = '/Users/alan/Documents/PhD/Data/SPHERE/IFS/QZCardone/ifs_sortframes_dc-IFS_SCIENCE_LAMBDA_INFO-lam.fits'
angles_filepath = '/Users/alan/Documents/PhD/Data/SPHERE/IFS/QZCardone/ifs_sortframes_dc-IFS_SCIENCE_PARA_ROTATION_CUBE_SORTED-rotnth_sorted.fits'
psf_filepath = '/Users/alan/Documents/PhD/Data/SPHERE/IFS/QZCardone/psf_corrected_final.fits'
## Photometry
comp_pos = (110.,54.) # Companion position in pixels from the center of the frame (X,Y)
psf_pos = (32, 32) # PSF position in pixels (X,Y)
frame_cent = (145,145) # Center of the frame
radial_dist = 98 # Radial distance of companion in pixels
position_angle = 159. # Position angle of companion in degrees
noise_aperture_pos_comp = (92,104) # Position in pixels of the circular annulus aperture for noise measurement in the case of the companion
noise_aperture_pos_psf = (13,23) # Position in pixels of the circular annulus aperture for noise measurement in the case of the PSF
size_psf = 31 # What size PSF would you like to use? ODD VALUE ONLY!!
## Computing power
ncores = 4 # Number of cores you are willing to share for the computation
## Do you want to see the image?
see_cube = False # Original cube
see_collapsed_cube = False # Collapsed cube
see_psf_norm = False # Normalised PSF
see_cube_centre = False # Check if the image is centered correctly
## PCA
ncomp_pca = 1 # Number of principal components for PCA
opti_pca = False # Optimise the number of PCA components?
source_pca = (28.,159.) # Source where to optimise the PCA
## SNR maps
snr_maps = False # Would you like to make and save an SNR map to disk?
snr_map_file = '/home/alan/data/Backup_macbook/SPHERE/IFS/QZCardone/SNRmap_VIP11.fits' # Finish the file with .fits
## Detection
adi_frame = False # Would you like to apply ADI on the frame?
adi_plot = False # Would you like to see the resulting plot?
adi_min_scale = -1 # Minimum colour scale for the ADI plot
adi_max_scale = 1 # Maximum colour scale for the ADI plot
detection = False # Would you like the algorithm to detect sources for you? !! WARNING: this is a simple detection !!
detect_sigma = 5 # What sigma limit would you like for the detection?
## Contrast curves
contrast_curves = False # True or False !! computationally intensive !!
cube_free_file = "/Users/alan/Documents/PhD/Data/SPHERE/IFS/HD93403/cube_free.fits" # Filepath to the cube free of sources/companions
n_branches = 1 # Number of branches for contrast curves
save_contrcurve = True # Save the contrast curve values to a file?
contrcurve_savefile = '/Users/alan/Documents/PhD/Data/SPHERE/IFS/CPD-582611/contrast_curve_IFS.txt'# Filepath to save the contrast curve distance and delta_mag
## Photometric errors of PSF
psf_errors = False # Compute the photometric errors of the central star's PSF
psf_errors_save = False # Save the errors to a file?
psf_errors_file = "/Users/alan/Documents/PhD/Data/SPHERE/IFS/QZCardone/PSF_errors.txt"
## Aperture Photometry
plot_aper = False # Plot the aperture photometry of the detected companion?
## Spectrum extraction with Simplex Nelder-Mead optimisation
extract_spec = False # Will start the simplex Nelder-Mead optimisation for spectrum extraction
ann_width = 3 # Annulus width of Simplex
aper_radius = 3 # Aperture Radius of PCA
save_spec = False # Save the spectrum to ascii file
sspec_file = '/Users/alan/Documents/PhD/Data/SPHERE/IFS/HD93403/VIP_simplex.txt' # Filepath to save the Simplex spectrum
plot_sspec = False # Plot the resulting spectrum?
## Spectrum extraction with MCMC
extract_mcmc = False # Will compute the MCMC for all 39 wavelengths !! This takes ~1,5h per wavelength and is very computer intensive !!
source = 'HD93403' # Give name for your source
mcmc_path = '/home/alan/data/Backup_macbook/SPHERE/IFS/HD93403/mcmc/' # Directory where MCMC results will be stored
plot_mcmc = False # Plot the mcmc errors with simplex?
## Reading MCMC results
read_mcmc = False # Do you wish to read the MCMC results?
source = 'QZCar' # Give name for your source
mcmc_path = '/Users/alan/Documents/PhD/Data/SPHERE/IFS/QZCardone/spectra/' # Directory where MCMC results are stored
## Load calibrated FASTWIND models of the central star
fastwind = False # Use FASTWIND model spectra for the star
fastwind_path = '/home/alan/data/Backup_macbook/SPHERE/IFS/HD93403/fastwind_model.txt' # Directory where the FASTWIND flux are
rad_fast = 22.1 # Radius of model star
dist_fast = 100. # Distance to consider for the flux of the calibrated spectrum in Ro
## Compute calibrated spectrum of companion
calib_spec = False # Do you wish to calibrate the spectrum of the companion?
save_calib_spec = False # Would you like to save the calibrated spectrum & associated error?
calib_star_spec_path = '/home/alan/data/Backup_macbook/SPHERE/IFS/HD93403/fastwind_model.txt' # Path to calibrated spectrum of central star
# sspec_file = '/Users/alan/Documents/PhD/Data/SPHERE/IFS/HD93403/VIP_simplex.txt' # Path to spectrum file
cspec_file = '/home/alan/data/Backup_macbook/SPHERE/IFS/HD93403/VIP_calib_spectra.txt' # Path to calibrated spectrum
## Magnitude contrasts
mag_contr = False # Do you to calculate the magnitude contrasts for your companion?
print_mag_contr = False # Do you wish to print the magnitude contrasts to the screen?
## Absolute magnitude !! Work in Progress !!
abs_mag = False # Would you like to calculate the absolute magnitudes of your companion?
print_abs_mag = False # Do you wish to print the absolute magnitudes to the screen?
star_mag_Y = 5.75 # Magnitude of central star Y band
star_mag_J = 5.551 # Magnitude of central star J band
star_mag_H = 5.393 # Magnitude of central star H band
star_mag_V = 6.24 # Magnitude of central star V band
star_dist = 2300. # Distance to central star in parsec
# ---------------------------------------------------------------------------
# Running script (DO NOT MODIFY)
# Some definitions
## Load libraries
import __init__
import sys
import matplotlib
import vip_hci
from hciplot import plot_frames, plot_cubes
from vip_hci.metrics.fakecomp import cube_inject_companions, frame_inject_companion, normalize_psf
import numpy as np
import scipy
import astropy.io.fits as fits
from astropy import wcs
import photutils
from photutils import CircularAperture
from photutils import CircularAnnulus
import matplotlib.pyplot as plt
import glob
import math as mh
from scipy.integrate import quad, dblquad
### Error libraries
from uncertainties import ufloat
from uncertainties.umath import *
from uncertainties import unumpy
## Define constants
c = 299792458. # Speed of light
Ro = 6.957e8 # Solar Radius
sr2pc = 44334448.0068964 # Convert steraradians to parsec
pxscale = 0.0074 # IFS pixel scale in arcsec/pixel
PA = position_angle + 90 # Correct for VIP unconventional rotation axis
## Open image files
cube = vip_hci.fits.open_fits(cube_filepath)
wl = vip_hci.fits.open_fits(wavelength_filepath)
angs = vip_hci.fits.open_fits(angles_filepath)
psf = vip_hci.fits.open_fits(psf_filepath)
## Define some Parameters
# psf = np.median(psf, axis=1) # Take the median of all PSFs
psf_scaled = np.zeros_like(psf) # The psf will need to be scaled
flevel = np.zeros_like(cube[:,0,0,0]) # Flux level for the companion
flevel = np.array(flevel) # Redefinition - why?
## Check the RAW data cubes
if see_cube == True:
ds9 = vip_hci.Ds9Window()
ds9.display(cube[0,0])
## Get FWHM of images & normalised PSF
psf_med = vip_hci.preproc.cosmetics.cube_crop_frames(psf, size_psf, xy=psf_pos, verbose=True, force=True) # Resize the PSF
psf_norm, maxflux, fwhm = vip_hci.metrics.normalize_psf(psf_med, fwhm='fit',size=None, threshold=None,mask_core=None, model='gauss',imlib='opencv',interpolation='lanczos4',force_odd=True,full_output=True,verbose=False) # maxflux is a dummy variable
### Plot it
if see_psf_norm == True:
plot_frames(psf_norm[0], grid=True, size_factor=10)
## Check if the cube is centred correctly by plotting
if see_cube_centre == True:
plot_frames(vip_hci.preproc.frame_crop(cube[0,0], 50), grid=True, size_factor=10)
## Optimise the number of PCA components
if opti_pca == True:
vip_hci.pca.pca(cube[0], angs, fwhm=fwhm[0], source_xy=(129,169),mask_center_px=None, ncomp=(1, 41, 2))
sys.exit("PCA optimised. To continue, please input the PCA value in the script and skip this process.")
## Detection with VIP, for now only with the first wavelength
if adi_frame == True:
### Compute the ADI frame for all 39 wavelengths and 48 rotations
print("Computing the ADI frame...")
fr_adi = vip_hci.medsub.median_sub(cube, -angs, scale_list=wl, mode='fullfr') # 2D ADI frame
print("Done!")
### Plot the frame
if adi_plot == True:
plot_frames(fr_adi, vmin=adi_min_scale, vmax=adi_max_scale)
### Compute the detection of sources
if detection==True:
detect = vip_hci.metrics.detection(fr_adi, fwhm=fwhm[0], psf=psf_norm[0], debug=False, mode='log', snr_thresh=detect_sigma,bkg_sigma=detect_sigma,matched_filter=True,vmin=adi_min_scale,vmax=adi_max_scale,verbose=False,plot=True) # Sigma limit provided by user
print("Detected sources : " , "\n", detect)
detect_pos = np.array(detect) # Converted to array in order to be used later
sys.exit("Sources detected. To continue, please input the target coordinates in the script and skip this process.")
# Stellar photometry of the companion
## Collapse the images for better photometry measurement
cube_derot = np.zeros_like(cube)
cube_wl_coll = np.zeros_like(cube[:,0])
for i in range(len(wl)):
cube_derot[i] = vip_hci.preproc.cube_derotate(cube[i],angs) # Rotate the images to the same north
cube_wl_coll[i] = vip_hci.preproc.cube_collapse(cube_derot[i]) # Collapse along the rotation axis - 3D image
# cube_coll = vip_hci.preproc.cube_collapse(cube_derot,wl_cube=False) # Collapse along the wavelength axis - 2D image
# cube_wl_coll = np.zeros_like(cube[:,0,:,:])
# for i in range(len(wl)):
# cube_wl_coll[i] = vip_hci.hci_postproc.median_sub(cube[i],-angs,fwhm=fwhm[i],verbose=False) # Rotate & collapse along the rotation axis - 3D image
# cube_derot = vip_hci.preproc.cube_derotate(cube,angs) # Rotate the images to the same north
## Check the collapsed data cubes
if see_collapsed_cube == True:
ds9 = vip_hci.Ds9Window()
ds9.display(cube_wl_coll[0]) # cube_wl_coll on the left and cube_coll on the right
## Aperture photometry of companions and PSF
### Define photometry
noise_phot = np.zeros_like(wl) #Noise photometry
psf_final_sum = np.zeros_like(wl) #PSF photometry
final_sum = np.zeros_like(wl) #Companion photometry
### Apertures
aper_noise_comp = photutils.CircularAnnulus(frame_cent,noise_aperture_pos_comp[0],noise_aperture_pos_comp[1])
aper_noise_psf = photutils.CircularAnnulus(psf_pos,noise_aperture_pos_psf[0],noise_aperture_pos_psf[1])
### Aperture photometry
for i in range(0,wl.shape[0]):
### Apertures dependent on channel
aper_comp = photutils.CircularAperture(comp_pos, 0.5*fwhm[i])
aper_psf = photutils.CircularAperture(psf_pos, 0.5*fwhm[i])
### Noise
phot_noise = photutils.aperture_photometry(cube_wl_coll[i], aper_noise_comp)
noise_phot[i] = np.array(phot_noise['aperture_sum'])
### PSF
phot_psf = photutils.aperture_photometry(psf[i], aper_psf)
phot_psf_noise = photutils.aperture_photometry(psf[i], aper_noise_psf)
psf_bkg_mean = phot_psf_noise['aperture_sum'] / aper_noise_psf.area()
psf_bkg_sum = psf_bkg_mean * aper_psf.area()
# psf_final_sum[i] = phot_psf['aperture_sum'] - psf_bkg_sum
psf_final_sum[i] = phot_psf['aperture_sum'] - psf_bkg_sum
### Companion
phot = photutils.aperture_photometry(cube_wl_coll[i], aper_comp)
bkg_mean = (phot_noise['aperture_sum']-phot['aperture_sum']) / (aper_noise_comp.area()-aper_comp.area())
bkg_sum = bkg_mean * aper_comp.area()
final_sum[i] = maxflux[i] - bkg_sum
### Scaling the PSF for normalisation -- SHOULD I JUST TAKE PSF_NORM INSTEAD?
psf_scaled = np.zeros_like(psf)
for i in range (0,len(psf)):
psf_scaled[i] = psf[i]/psf_final_sum[i]
## SNR maps
if snr_maps == True:
snrmap = vip_hci.metrics.snrmap(vip_hci.pca.pca(cube, -angs, scale_list=wl, ncomp=ncomp_pca, verbose=True), fwhm[0], nproc=ncores, plot=True)
vip_hci.fits.write_fits(snr_map_file,snrmap) # Write SNR maps to file
## Contrast curve
if contrast_curves == True:
cube_negfc = vip_hci.fits.open_fits(cube_free_file)
print("Computing contrast curve...")
contrcurve = vip_hci.metrics.contrast_curve(cube_negfc,-angs,psf_norm,np.average(fwhm),pxscale,maxflux,vip_hci.pca.pca,nbranch=n_branches,
dpi=300, student=False, debug=True ,plot=True, verbose=True, full_output=True, ncomp=ncomp_pca, scale_list=wl)
if save_contrcurve == True:
contr_dist = contrcurve[0]['distance_arcsec']
contr_mag = -2.5*np.log10(contrcurve[0]['sensitivity_gaussian'])
contr_curve = np.zeros((len(contr_mag),2))
contr_curve[:,0] = contr_dist
contr_curve[:,1] = contr_mag
np.savetxt(contrcurve_savefile,contr_curve, delimiter=' ') # Saves to file)
print("Contrast curve saved!")
## PSF error calculation
if psf_errors == True:
psferr = vip_hci.fits.open_fits(psf_filepath) # Open the raw PSFs
stddev_psf = np.zeros_like(wl) # Create an array for the stored standard deviation
for i in range(len(wl)): # Loop over the wavelengths
psferr_med = vip_hci.preproc.cosmetics.cube_crop_frames(psferr[i], size_psf, xy=psf_pos, verbose=True, force=True) # Resize the PSF
psf_norm_err, maxflux_err, fwhm_err = vip_hci.metrics.normalize_psf(psferr_med, fwhm='fit',size=None, threshold=None,mask_core=None, model='gauss',imlib='opencv',interpolation='lanczos4',force_odd=True,full_output=True,verbose=False) # Measure the maximum flux for each PSF
stddev_psf[i] = np.std(maxflux_err,ddof=1) # Calculate the standard deviation for the PSFs
if psf_errors_save: # Save the error
np.savetxt(psf_errors_file,stddev_psf,delimiter=' ') # Saves to file
print("PSF errors saved to file!")
# Spectrum extraction with NM
if extract_spec == True:
## Define some parameters
comp_xycoord = [(comp_pos[0],comp_pos[1])] # Companion coords
f_guess_pl = max(final_sum) # Flux first guess as the maximum value of the flux
f_range = np.linspace(0.*f_guess_pl,3.*f_guess_pl,100)
p_in = np.array([radial_dist,PA]) # Regroup companion positions
simplex_options = {'xtol':1e-2, 'maxiter':500, 'maxfev':1000} # Set the simplex options
simplex_guess = np.zeros((len(wl),3)) # Set the simplex variable: r, PA, flux
## Start Simplex
for i in range(0,len(wl)):
print("Wavelength index: ", i + 1) # 39 wavelengths for IFS
simplex_guess[i] = vip_hci.negfc.firstguess(cube[i],-angs,psf_norm[i],ncomp=ncomp_pca,plsc=pxscale,planets_xy_coord=comp_xycoord,fwhm=fwhm[i],annulus_width=ann_width,aperture_radius=aper_radius,simplex_options=simplex_options,f_range=f_range,simplex=True,fmerit='sum',collapse='median',svd_mode='lapack',scaling=None,verbose=False,plot=False,save=False)
#simplex_guess[i] = vip_hci.negfc.simplex_optim.firstguess(cube[i], -angs, psf_norm[i], ncomp=1, plsc=0.0074,fwhm=fwhm[i], annulus_width=3, aperture_radius=2, planets_xy_coord=comp_xycoord, cube_ref=None,svd_mode='lapack',f_range=f_range, simplex=True,fmerit='sum',scaling=None, simplex_options=simplex_options,collapse='median',verbose=False)
print(simplex_guess[i])
## Save the spectrum
if save_spec == True:
np.savetxt(sspec_file, simplex_guess, delimiter=' ') # Saves to file
print("Spectrum saved successfully!")
# Spectrum extraction with MCMC
if extract_mcmc == True:
## If the spectrum extraction was initiated previously, we do not need to read the resulting file. In case it was not, we read the file specified
if extract_spec == False:
## Read the file containing the simplex minimization values and save the values to an array
simplex_guess = np.zeros((len(wl),3)) # Simplex array
j = 0
with open(sspec_file) as f:
for line in f:
line = line.strip()
columns = line.split()
simplex_guess[j,0] = float(columns[0]) # Radius
simplex_guess[j,1] = float(columns[1]) # PA
simplex_guess[j,2] = float(columns[2]) # Flux
j+=1
f.close
instru= 'IFS36059' # Define instrument parameters
fig_merit='sum' # Summation figure of merit
outpath = mcmc_path.format(source) # Path to save MCMC files
print("########## MCMC Sampling starting... ##########")
nwalkers, itermin, itermax = (100,200,500) # as recommended by Oli W
for i in range(len(final_sum)): # For each wavelength channel
initialState = simplex_guess[i] # Take r, PA and flux from simplex
bounds=[[0.75*initialState[0],1.25*initialState[0]],[0.75*initialState[1],1.25*initialState[1]],[0.75*initialState[2],1.30*initialState[2]]] # Initiate bounds
output_file = source+'_IFS_wavelength_{}'.format(i) # Save to output file
chain_40 = vip_hci.negfc.mcmc_negfc_sampling(cube[i], -angs, psf_norm[i], ncomp_pca, pxscale, initialState, ann_width,
aper_radius, cube_ref=None, svd_mode='lapack', nwalkers=nwalkers,
bounds=bounds, niteration_min=itermin,
niteration_limit=itermax, check_maxgap=50, nproc= ncores,
output_file=output_file, display=False,verbosity=1, save=True,
rhat_threshold=1.01, niteration_supp=0, fmerit=fig_merit) # MCMC run per channel
print("########## MCMC Sampling done! ##########")
## Read MCMC files
if read_mcmc == True:
import pickle # Import important MCMC libraries
from pickle import Pickler
pickler={}
mcmc_result={}
outpath = mcmc_path.format(source) # Path to save MCMC files
for i in range(0,len(wl)): # Read all channels and store them to variables
with open(outpath+source+'_IFS_wavelength_{}/MCMC_results'.format(i),'rb') as fi:
pickler["myPickler{}".format(i)] = pickle.Unpickler(fi)
mcmc_result["mcmc_result{}".format(i)] = pickler["myPickler{}".format(i)].load()
## Create variable to store MCMC results
final_pos = []
final_PA = []
final_contr = []
final_pos_gauss = []
final_PA_gauss = []
final_contr_gauss = []
## Obtain r, PA, flux and error values
for i in range(0,len(wl)):
mcmc = mcmc_result["mcmc_result{}".format(i)]
chain_40 = mcmc['chain']
index = np.where(mcmc['AR']>0.4)
print('Wavelength channel: ', i)
burnin = 0.8
chain_40_g = chain_40[index]
isamples_flat = chain_40_g[:,int(chain_40_g.shape[1]//(1/burnin)):,:].reshape((-1,3))
val_max,conf,mu,sigma = vip.negfc.mcmc_sampling.confidence(isamples_flat,
cfd = 68,
gaussianFit = True,
verbose=False,
save=False,
full_output=True,
title=source,
edgecolor = 'm',
facecolor = 'b',range=())
pKey = ['r','theta','f']
PA = val_max[pKey[1]]-90
if PA < -180: PA = val_max[pKey[1]]+360
final_pos.append([val_max[pKey[0]],conf[pKey[0]][0],conf[pKey[0]][1]])
final_PA.append([PA,conf[pKey[1]][0],conf[pKey[1]][1]])
final_contr.append([val_max[pKey[2]],conf[pKey[2]][0],conf[pKey[2]][1]])
final_pos_gauss.append([mu[0],sigma[0]])
final_PA_gauss.append([mu[1],sigma[1]])
final_contr_gauss.append([mu[2],sigma[2]])
# Get values into usuable arrays
spectra_gauss = np.array([item[0] for item in final_contr_gauss])
spectra_gauss_err = np.array([item[1] for item in final_contr_gauss])
# Read FASTWIND calibrated spectra
if fastwind == True:
## Open file
f = open(fastwind_path + 'FLUXCONT', 'r')
## Read and ignore header lines
header1 = f.readline()
# Define the wavelength and fnue
fast_wavel = 0
fast_flux = 0
fast_wavel = np.array([])
fast_flux = np.array([])
## Loop over lines and extract variables of interest within the wavelength range of IFS
for line in f:
line = line.strip()
columns = line.split()
if float(columns[1]) > 9400 and float(columns[1]) < 16400:
fast_wavel = np.append(fast_wavel,float(columns[1]))
fast_flux = np.append(fast_flux,float(columns[2]))
if float(columns[1]) < 9300:
break
f.close()
# The flux is measured the opposite way as for IFS
fast_wavel = fast_wavel[::-1]
fast_flux = fast_flux[::-1]
## Adjust the model spectra to the same wavelengths as IFS
model_spectra = np.interp(wl*1e4,fast_wavel,fast_flux)
## Define some parameters
model_flux = np.zeros_like(wl)
## Put the flux from frequency to wavelength space
for i in range(len(wl)):
model_flux[i] = (c*1e10) / (wl[i]*1e4)**2 * 10**(model_spectra[i])
## Scale the flux to a distance of Ro
flux = model_flux/(dist_fast)**2 * (120*rad_fast)**2 #Use 100 Ro for distance to flux measurement
# Companion spectrum calibration
if calib_spec == True:
## If spectrum file is specified, then read and store it
if sspec_file is not None:
simplex_guess = np.zeros((39,3)) # Set the simplex variable: r, PA, flux
f = open(sspec_file,'r')
for line in f:
line = line.strip()
columns = line.split()
simplex_guess[:][0] = float(columns[0])
simplex_guess[:][1] = float(columns[1])
simplex_guess[:][2] = float(columns[2])
f.close()
## Read calibrated spectrum and store it
if calib_star_spec_path is not None:
calib_spectrum = np.zeros_like(wl)
f = open(calib_star_spec_path,'r')
for line in f:
line = line.strip()
columns = line.split()
calib_spectrum = float(columns)
f.close()
## Calculate the contrast spectrum of the companion
contr_spectra = np.zeros_like(wl)
contr_spectra_err = np.zeros_like(wl)
for i in range(0,len(wl)):
contr_spectra[i] = simplex_guess[i][2]/psf_final_sum[i] # Spectrum
contr_spectra_err[i] = (spectra_gauss_err[i]/simplex_guess[i][2])*(simplex_guess[i][2]/psf_final_sum[i]) # Error on contrast spectrum !! Requires prior MCMC reading !!
## Calculate the calibrated companion spectrum
calib_comp_spec = contr_spectra * calib_spectrum
calib_comp_spec_err = calib_comp_spec * contr_spectra_err/contr_spectra
## Save the calibrated spectrum with errors if requested
if save_calib_spec == True:
comp_spec = np.zeros((39,2))
for i in range(0,39):
comp_spec[i][0] = calib_comp_spec
comp_spec[i][1] = calib_comp_spec_err
np.savetxt(cspec_file, comp_spec, delimiter=' ')
# Magnitude contrasts
if mag_contr == True:
## If the spectrum extraction was initiated previously, we do not need to read the resulting file. In case it was not, we read the file specified
if extract_spec == False:
## Read the file containing the simplex minimization values and save the values to an array
simplex_flux = np.zeros_like(wl) # Simplex array
j = 0
with open(sspec_file) as f:
for line in f:
line = line.strip()
columns = line.split()
simplex_flux[j] = float(columns[2])
j+=1
f.close
## Otherwise, carry on
else:
for i in range(len(wl)):
simplex_flux[i] = simplex_guess[i][2]
## Compute the contrast spectrum
contr_spectra = simplex_flux / psf_final_sum
#contr_spectra_err
## Define the Y, J and H bands
Y = wl[0:14]
J = wl[14:25]
H = wl[25:39]
## Calculate their respective contrast spectra
contr_spectra_Y = contr_spectra[0:14]
contr_spectra_J = contr_spectra[14:25]
contr_spectra_H = contr_spectra[25:39]
# contr_err_Y = contr_err[0:14]
# contr_err_J = contr_err[14:25]
# contr_err_H = contr_err[25:39]
## Define some stored parameters
dmag_Y = np.zeros_like(contr_spectra_Y)
dmag_J = np.zeros_like(contr_spectra_J)
dmag_H = np.zeros_like(contr_spectra_H)
# dmag_Y_err = np.zeros_like(contr_spectra_Y)
# dmag_J_err = np.zeros_like(contr_spectra_J)
# dmag_H_err = np.zeros_like(contr_spectra_H)
for i in range(len(contr_spectra_Y)):
dmag_Y[i] = -2.5*mh.log10(contr_spectra_Y[i])
dmag_H[i] = -2.5*mh.log10(contr_spectra_H[i])
# dmag_Y_err[i] = np.abs((1/mh.log(10)) * -2.5 * (contr_err_Y[i]/contr_spectra_Y[i]))
# dmag_H_err[i] = np.abs((1/mh.log(10)) * -2.5 * (contr_err_H[i]/contr_spectra_H[i]))
for i in range(len(contr_spectra_J)):
dmag_J[i] = -2.5*mh.log10(contr_spectra_J[i])
# dmag_J_err[i] = np.abs((1/mh.log(10)) * -2.5 * (contr_err_J[i]/contr_spectra_J[i]))
if print_mag_contr == True:
print("Y contrast magnitude: ", dmag_Y)# + " +/- " + dmag_Y_err)
print("J contrast magnitude: ", dmag_J)# + " +/- " + dmag_J_err)
print("H contrast magnitude: ", dmag_H)# + " +/- " + dmag_H_err)
# Absolute magnitudes
if abs_mag == True:
## Zero-point flux m=0
star_flux_Y = 2026*10**(star_mag_Y/-2.5)
star_flux_J = 1600*10**(star_mag_J/-2.5)
star_flux_H = 1080*10**(star_mag_H/-2.5)
## Companion flux
FluxJy_Y = contr_spec_Y * star_flux_Y
FluxJy_J = contr_spec_J * star_flux_J
FluxJy_H = contr_spec_H * star_flux_H
## Companion magnitude
mag_comp = np.zeros([3])
mag_comp[0] = -2.5 * mh.log10(FluxJy_Y/1.)#Yband zero point flux)
mag_comp[1] = -2.5 * mh.log10(FluxJy_J/1.)#Yband zero point flux)
mag_comp[2] = -2.5 * mh.log10(FluxJy_H/1.)#Yband zero point flux)
mag_comp_err = np.zeros([3])
mag_comp_err[0] = -2.5 * mh.log10(FluxJy_Y/1.)#Yband zero point flux)
mag_comp_err[1] = -2.5 * mh.log10(FluxJy_J/1.)#Yband zero point flux)
mag_comp_err[2] = -2.5 * mh.log10(FluxJy_H/1.)#Yband zero point flux)
## Extinction
BV_obs = 6.37-6.24
BV_O = -0.26
Rv = 3.1
E_BV = BV_obs - BV_O
Av = Rv * E_BV
Ay = Av * 1.# Missing
Aj = Av * 0.282
Ah = Av * 0.175
## Absolute magnitude
M_comp = np.zeros_like(mag_comp)
M_comp[0] = mag_comp[0] - Ay - 5*(mh.log10(star_dist)-1)
M_comp[1] = mag_comp[1] - Aj - 5*(mh.log10(star_dist)-1)
M_comp[2] = mag_comp[2] - Ah - 5*(mh.log10(star_dist)-1)
M_comp_err = np.zeros_like(mag_comp)
M_comp_err[0] = mag_comp[0] - Ay - 5*(mh.log10(star_dist)-1)
M_comp_err[1] = mag_comp[1] - Aj - 5*(mh.log10(star_dist)-1)
M_comp_err[2] = mag_comp[2] - Ah - 5*(mh.log10(star_dist)-1)
## Print absolute magnitudes
if print_abs_mag == True:
print("Y apparent magnitude: " + mag_comp[0] + " +/- " + mag_comp_err[0])
print("J apparent magnitude: " + mag_comp[1] + " +/- " + mag_comp_err[1])
print("H apparent magnitude: " + mag_comp[2] + " +/- " + mag_comp_err[2])
print("Y absolute magnitude: " + M_comp[0] + " +/- " + M_comp_err[0])
print("J absolute magnitude: " + M_comp[1] + " +/- " + M_comp_err[1])
print("H absolute magnitude: " + M_comp[2] + " +/- " + M_comp_err[2])
# Plotting
## Aperture Photometry
if plot_aper == True:
plt.figure(figsize=(12, 9))
#plt.title('Aperture Photometry IFS')
#plt.legend()
# ax.get_xaxis().tick_bottom()
# ax.get_yaxis().tick_left()
plt.ylim(0, 1.1*max(final_sum))
plt.grid(True, 'major', 'y', ls='--', lw=.5, c='k', alpha=.5)
#plt.grid(True, 'major', 'x', ls='--', lw=.5, c='k', alpha=.3)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.ylabel("Flux [ADU/s]", fontsize=20)
plt.xlabel('Wavelength [$\mathring{A}$]', fontsize=20)
plt.plot(wl*1e4, final_sum,lw=2.8)
plt.show()
## Simplex Optim
if plot_sspec == True:
simplex_flux = np.zeros_like(wl)
for i in range(len(wl)):
simplex_flux[i] = simplex_guess[i][2]
plt.figure(figsize=(12, 9))
#plt.title('Aperture Photometry IFS')
#plt.legend()
# ax.get_xaxis().tick_bottom()
# ax.get_yaxis().tick_left()
plt.ylim(0, 1.1*max(simplex_flux))
plt.grid(True, 'major', 'y', ls='--', lw=.5, c='k', alpha=.5)
#plt.grid(True, 'major', 'x', ls='--', lw=.5, c='k', alpha=.3)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.ylabel("Simplex flux [ADU/s]", fontsize=20)
plt.xlabel('Wavelength [$\mathring{A}$]', fontsize=20)
plt.plot(wl*1e4, simplex_flux,lw=2.8)
plt.show()
| 29,887 | 46.067717 | 361 | py |
SHIPS | SHIPS-master/ships_irdis.py | ############################
# Date: 07/04/2020
# Title: Running script for SHIPS for IRDIS data
# Description: Use this script to run SHIPS for IRDIS data. In this script you'll find all the necessary parameters to run SHIPS. ONLY SPHERE-DC DATA FOR NOW. VIP is used.
# VIP version: 0.9.11 (Rainot edit.)
# Python version: 3 ONLY
############################
# Set up your parameters
## Define images to analyse
cube_filepath = '/Users/alan/Documents/PhD/Data/SPHERE/IRDIS/QZCar/ird_convert_dc-IRD_SCIENCE_REDUCED_MASTER_CUBE-center_im.fits'
wavelength_filepath = '/Users/alan/Documents/PhD/Data/SPHERE/IRDIS/QZCar/ird_convert_dc-IRD_SCIENCE_LAMBDA_INFO-lam.fits'
angles_filepath = '/Users/alan/Documents/PhD/Data/SPHERE/IRDIS/QZCar/ird_convert_dc-IRD_SCIENCE_PARA_ROTATION_CUBE-rotnth.fits'
psf_filepath = '/Users/alan/Documents/PhD/Data/SPHERE/IRDIS/QZCar/ird_convert_recenter_dc5-IRD_SCIENCE_PSF_MASTER_CUBE-median_unsat.fits'
## Photometry
comp_pos = ([490.,455.],[559.,579.],[688,630],[570,702],[311,486],[696,419],[296,472],[654,707],[517,241],[227,569],[344,775],[847,491],[899,507],[72,533],[819,180],[451.,60.],[49.,396.],[732.,44.],[647.,15.])# Companion position in pixels (X,Y)
psf_pos = (32, 32) # PSF position in pixels (X,Y)
radial_dist = [98.0204060387,179.047479737,238.465930481,247.080958392,275.481396831,330.492057393,380.553544196,428.042054009,433.887082085,452.598055674,482.597140481]# Radial distance of companion in pixels
position_angle = [295.9,311.8] # Position angle of companion in degrees
noise_aperture_pos_comp = (512,512) # Position in pixels of the circular annulus aperture for noise measurement in the case of the companion
noise_aperture_pos_psf = (12,22) # Position in pixels of the circular annulus aperture for noise measurement in the case of the PSF
## Computing power
ncores = 4 # Number of cores you are willing to share for the computation
## Do you want to see the image?
see_cube = False # Original cube
see_collapsed_cube = False # Collapsed cube
see_psf_norm = False # Normalised PSF
see_cube_centre = False # Check if the image is centered correctly
size_psf = 31
## PCA
ncomp_pca = 1 # Number of principal components for PCA
opti_pca = False # Optimise the number of PCA components?
source = (501,525) # Source where to optimise the PCA
## SNR maps
snr_maps = False # Would you like to make and save an SNR map to disk?
snr_map_file = '/Users/alan/Documents/PhD/Data/SPHERE/IRDIS/HD93403/SNRmap_VIP_n1.fits' # Finish the file with .fits
## Detection
adi_frame = False # Would you like to apply ADI on the frame?
adi_plot = False # Would you like to see the resulting plot?
adi_min_scale = -0.2 # Minimum colour scale for the ADI plot
adi_max_scale = 0.2 # Maximum colour scale for the ADI plot
detection = False # Would you like the algorithm to detect sources for you? !! WARNING: this is a simple detection !!
detect_sigma = 3 # What sigma limit would you like for the detection?
## Contrast curves
contrast_curves = False # True or False !! computationally intensive !!
cube_free_file = "/Users/alan/Documents/PhD/Data/SPHERE/IFS/HD93403/cube_free_IFS.fits" # Filepath to the cube free of sources/companions
n_branches = 1 # Number of branches for contrast curves
save_contrcurve = False # Save the contrast curve values to a file?
contrcurve_savefile = '/Users/alan/Documents/PhD/Data/SPHERE/IRDIS/HD93403/contrast_curve_IRDIS.txt'# Filepath to save the contrast curve distance and delta_mag
## Photometric errors of PSF
psf_errors = False # Compute the photometric errors of the central star's PSF
psf_errors_save = False # Save the errors to a file?
psf_errors_file = '/Users/alan/Documents/PhD/Data/SPHERE/IRDIS/QZCar/PSF_errors.txt' # Filepath to save the PSF errors
## Spectrum extraction with PSF fitting
extract_psf_fitting = False # Will start the PSF fitting for spectrum extraction
psf_fitting_contr = False # Would you like to have the full output of PSF fitting = plots for every sources + contrast magnitudes?
save_psf_fitting = False # Save the spectrum to ascii file
psff_file_K1 = '/Users/alan/Documents/PhD/Data/SPHERE/IRDIS/QZCar/psf_fitting_K1.txt' # Filepath to save the Simplex spectrum for the K1 band
psff_file_K2 = '/Users/alan/Documents/PhD/Data/SPHERE/IRDIS/QZCar/psf_fitting_K2.txt' # Filepath to save the Simplex spectrum for the K2 band
## Spectrum extraction with Simplex Nelder-Mead optimisation
extract_spec = True # Will start the simplex Nelder-Mead optimisation for spectrum extraction
ann_width = 3 # Annulus width of Simplex
aper_radius = 3 # Aperture Radius of PCA
save_spec = False # Save the spectrum to ascii file
sspec_file_K1 = '/Users/alan/Documents/PhD/Data/SPHERE/IRDIS/HD93403/VIP_simplex_K1.txt' # Filepath to save the Simplex spectrum for the K1 band
sspec_file_K2 = '/Users/alan/Documents/PhD/Data/SPHERE/IRDIS/HD93403/VIP_simplex_K2.txt' # Filepath to save the Simplex spectrum for the K2 band
## Spectrum extraction with MCMC
extract_mcmc = False # Will compute the MCMC for all sources !! This takes ~22h per source and is very computer intensive !!
source = 'QZCar' # Give name for your primary star
mcmc_path = '/Users/alan/Documents/PhD/Data/SPHERE/IRDIS/QZCar/spectra/' # Directory where MCMC results will be stored
## Reading MCMC results
read_mcmc = False # Do you wish to read the MCMC results?
source = 'QZCar' # Give name for your source
mcmc_path = '/Users/alan/Documents/PhD/Data/SPHERE/IRDIS/QZCar/MCMC/' # Directory where MCMC results are stored
## Load calibrated FASTWIND models of the central star
fastwind = False # Use FASTWIND model spectra for the star
fastwind_path = '/Users/alan/Nextcloud/PhD/Thesis/SPHERE/spectra/fastwind/qzcarAa1/' # Directory where the FASTWIND flux are
rad_fast = 22.1 # Radius of model star
dist_fast = 100. # Distance to consider for the flux of the calibrated spectrum in Ro
## Compute calibrated spectrum of companion
calib_spec = False # Do you wish to calibrate the spectrum of the companions?
save_calib_spec = False # Would you like to save the calibrated spectrum & associated error?
calib_star_spec_path = '/Users/alan/Nextcloud/PhD/Thesis/SPHERE/spectra/fastwind/qzcar_fastwind_spec.txt' # Path to calibrated spectrum of central star
# sspec_file = '/Users/alan/Documents/PhD/Data/SPHERE/IRDIS/QZCar/VIP_simplex.txt' # Path to spectrum file
cspec_file = '/Users/alan/Documents/PhD/Data/SPHERE/IRDIS/QZCar/VIP_calib_spectra.txt' # Path to calibrated spectrum
# ---------------------------------------------------------------------------
# Running script (DO NOT MODIFY)
# Some definitions
## Load libraries
import __init__
import sys
import matplotlib
import vip_hci
from hciplot import plot_frames, plot_cubes
from vip_hci.metrics.fakecomp import cube_inject_companions, frame_inject_companion, normalize_psf
import numpy as np
import scipy
import astropy.io.fits as fits
from astropy import wcs
import photutils
from photutils import CircularAperture
from photutils import CircularAnnulus
import matplotlib.pyplot as plt
import glob
import math as mh
from scipy.integrate import quad, dblquad
from PSFfitting import psf_fitting
## Define constants
c = 299792458. # Speed of light
Ro = 6.957e8 # Solar Radius
sr2pc = 44334448.0068964 # Convert steraradians to parsec
pxscale = 0.01225 # IRDIS pixel scale in arcsec/pixel
PA = np.array(position_angle) + 90 # Correct for VIP unconventional rotation axis
## Open image files
cube = vip_hci.fits.open_fits(cube_filepath)
wl = vip_hci.fits.open_fits(wavelength_filepath)
angs = vip_hci.fits.open_fits(angles_filepath)
psf = vip_hci.fits.open_fits(psf_filepath)
## Define some Parameters
psf = np.median(psf, axis=1) # Take the median value of all psf observations
psf_scaled = np.zeros_like(psf) # The psf will need to be scaled
flevel = np.zeros_like(cube[:,0,0,0]) # Flux level for the companion
flevel = np.array(flevel) # Redefinition - why?
centy, centx = vip_hci.var.frame_center(cube[0,0]) # Find the coordinate centers of the frame
## Check the RAW data cubes
if see_cube == True:
ds9 = vip_hci.Ds9Window()
ds9.display(cube[0,0])
## Get FWHM of images & normalised PSF
psf_med = vip_hci.preproc.cosmetics.cube_crop_frames(psf, size_psf, xy=(32, 32), verbose=True, force=True) # Resize the PSF
psf_norm, maxflux, fwhm = vip_hci.metrics.normalize_psf(psf_med, fwhm='fit',size=None, threshold=None,mask_core=None, model='gauss',imlib='opencv',interpolation='lanczos4',force_odd=True,full_output=True,verbose=False) # maxflux is a dummy variable
### Plot it
if see_psf_norm == True:
plot_frames(psf_norm[1], grid=True, size_factor=4)
## Check if the cube is centred correctly by plotting
if see_cube_centre == True:
plot_frames(vip_hci.preproc.frame_crop(cube[0,0], 50), grid=True, size_factor=4)
## Optimise the number of PCA components
if opti_pca == True:
vip_hci.pca.pca(cube[0], angs, fwhm=fwhm[0], source_xy=(501,525),mask_center_px=None, ncomp=(1, 41, 2))
sys.exit("PCA optimised. To continue, please input the PCA value in the script and skip this process.")
## Detection with VIP, for now only with the first wavelength
if adi_frame == True:
### Compute the ADI frame for all 39 wavelengths and 48 rotations
print("Computing the ADI frame...")
fr_adi = vip_hci.medsub.median_sub(cube, -angs, scale_list=wl, mode='fullfr') # 2D ADI frame
print("Done!")
### Plot the frame
if adi_plot == True:
plot_frames(fr_adi, vmin=adi_min_scale, vmax=adi_max_scale)
### Compute the detection of sources
if detection==True:
detect = vip_hci.metrics.detection(fr_adi, fwhm=fwhm[0], psf=psf_norm[0], debug=False, mode='snrmap', snr_thresh=detect_sigma,bkg_sigma=detect_sigma,matched_filter=True,vmin=adi_min_scale,vmax=adi_max_scale,verbose=False) # Sigma limit provided by user
print("Detected sources : " , "\n", detect)
detect_pos = np.array(detect) # Converted to array in order to be used later
sys.exit("Sources detected. To continue, please input the target coordinates in the script and skip this process.")
## SNR maps
if snr_maps == True:
snrmap = vip_hci.metrics.snrmap(vip_hci.pca.pca(cube, -angs, scale_list=wl, ncomp=ncomp_pca, verbose=True), fwhm[0], nproc=ncores, plot=True)
vip_hci.fits.write_fits(snr_map_file,snrmap) # Write SNR maps to file
sys.exit("SNR maps created. To continue, please continue from the beginning process.")
# Stellar photometry of the companion
## Collapse the images for better photometry measurement
cube_derot = np.zeros_like(cube)
cube_wl_coll = np.zeros_like(cube[:,0])
for i in range(len(wl)):
cube_derot[i] = vip_hci.preproc.cube_derotate(cube[i],angs) # Rotate the images to the same north
cube_wl_coll[i] = vip_hci.preproc.cube_collapse(cube_derot[i]) # Collapse along the rotation axis - 3D image
# cube_coll = vip_hci.preproc.cube_collapse(cube_derot,wl_cube=False) # Collapse along the wavelength axis - 2D image
# cube_wl_coll = np.zeros_like(cube[:,0,:,:])
# for i in range(len(wl)):
# cube_wl_coll[i] = vip_hci.hci_postproc.median_sub(cube[i],-angs,fwhm=fwhm[i],verbose=False) # Rotate & collapse along the rotation axis - 3D image
# cube_derot = vip_hci.preproc.cube_derotate(cube,angs) # Rotate the images to the same north
## Check the collapsed data cubes
if see_collapsed_cube == True:
ds9 = vip_hci.Ds9Window()
ds9.display(cube_wl_coll[0])#,cube_coll) # cube_wl_coll on the left and cube_coll on the right
## Aperture photometry of companions and PSF
### Define photometry
noise_phot = np.zeros_like(wl) #Noise photometry
psf_final_sum = np.zeros_like(wl) #PSF photometry
final_sum_K1 = np.zeros(len(comp_pos)) #Companion photometry in the K1 band
final_sum_K2 = np.zeros(len(comp_pos)) #Companion photometry in the K2 band
### Apertures
aper_noise_psf = photutils.CircularAnnulus(psf_pos,noise_aperture_pos_psf[0],noise_aperture_pos_psf[1])
### Aperture photometry - PSF
for i in range(0,len(wl)):
### Aperture
aper_psf = photutils.CircularAperture(psf_pos, 1./2*fwhm[i])
### Flux
phot_psf = photutils.aperture_photometry(psf[i], aper_psf)
phot_psf_noise = photutils.aperture_photometry(psf[i], aper_noise_psf)
psf_bkg_mean = phot_psf_noise['aperture_sum'] / aper_noise_psf.area
psf_bkg_sum = psf_bkg_mean * aper_psf.area
psf_final_sum[i] = maxflux[i] - psf_bkg_sum
### Aperture photometry - Companions
for i in range(0,len(radial_dist)):
### Apertures dependent on companions
aper_noise_comp = photutils.CircularAnnulus((512,512),radial_dist[i]-5,radial_dist[i]+5)
aper_comp_K1 = photutils.CircularAperture((comp_pos[i][0], comp_pos[i][1]),1./2*fwhm[0])
aper_comp_K2 = photutils.CircularAperture((comp_pos[i][0], comp_pos[i][1]),1./2*fwhm[1])
### Flux
phot_noise_K1 = photutils.aperture_photometry(cube_wl_coll[0], aper_noise_comp)
phot_noise_K2 = photutils.aperture_photometry(cube_wl_coll[1], aper_noise_comp)
phot_K1 = photutils.aperture_photometry(cube_wl_coll[0], aper_comp_K1)
phot_K2 = photutils.aperture_photometry(cube_wl_coll[1], aper_comp_K2)
bkg_mean_K1 = (phot_noise_K1['aperture_sum']-phot_K1['aperture_sum']) / (aper_noise_comp.area-aper_comp_K1.area)
bkg_mean_K2 = (phot_noise_K2['aperture_sum']-phot_K2['aperture_sum']) / (aper_noise_comp.area-aper_comp_K2.area)
bkg_sum_K1 = bkg_mean_K1 * aper_comp_K1.area
bkg_sum_K2 = bkg_mean_K2 * aper_comp_K2.area
final_sum_K1[i] = phot_K1['aperture_sum'] - bkg_sum_K1
final_sum_K2[i] = phot_K2['aperture_sum'] - bkg_sum_K2
### Scaling the PSF for normalisation -- SHOULD I JUST TAKE PSF_NORM INSTEAD?
psf_scaled = np.zeros_like(psf)
for i in range (0,len(psf)):
psf_scaled[i] = psf[i]/psf_final_sum[i]
## Contrast curve
if contrast_curves == True:
cube_negfc = vip_hci.fits.open_fits(cube_free_file)
print("Computing contrast curve...")
contrcurve = vip_hci.metrics.contrast_curve(cube_negfc,-angs,psf,np.average(fwhm),pxscale,psf_final_sum,vip_hci.pca.pca,nbranch=n_branches,
dpi=300, student=False, debug=True ,plot=True, verbose=True, full_output=True, ncomp=ncomp_pca, scale_list=wl)
if save_contrcurve == True:
contr_dist = contrcurve[0]['distance_arcsec']
contr_mag = -2.5*np.log10(contrcurve[0]['sensitivity_gaussian'])
contr_curve = np.zeros((len(contr_mag),2))
contr_curve[:,0] = contr_dist
contr_curve[:,1] = contr_mag
np.savetxt(contrcurve_savefile,contr_curve, delimiter=' ') # Saves to file)
print("Contrast curve saved!")
## PSF error calculation
if psf_errors == True:
psferr = vip_hci.fits.open_fits(psf_filepath) # Open the raw PSFs
stddev = np.zeros_like(wl) # Create an array for the stored standard deviation
for i in range(len(wl)): # Loop over the wavelengths
psferr_med = vip_hci.preproc.cosmetics.cube_crop_frames(psferr[i], size_psf, xy=(32, 32), verbose=True, force=True) # Resize the PSF
psf_norm_err, maxflux_err, fwhm_err = vip_hci.metrics.normalize_psf(psferr_med, fwhm='fit',size=None, threshold=None,mask_core=None, model='gauss',imlib='opencv',interpolation='lanczos4',force_odd=True,full_output=True,verbose=False) # Measure the maximum flux for each PSF
stddev[i] = np.std(maxflux_err,ddof=1) # Calculate the standard deviation for the PSFs
if psf_errors_save: # Save the error
np.savetxt(psf_errors_file, stddev, delimiter=' ') # Saves to file
# Spectrum extraction with PSF fitting
if extract_psf_fitting == True:
## Full output
if psf_fitting_contr == True:
###K1
x_K1,x0_K1,y_K1,y0_K1,f_K1,f0_K1,psf_flux_K1,psf0_flux_K1,c_K1,c0_K1 = psf_fitting(cube_wl_coll[0],psf[0],fwhm[0],comp_pos,psf_pos,full_output=True)
###K2
x_K2,x0_K2,y_K2,y0_K2,f_K2,f0_K2,psf_flux_K2,psf0_flux_K2,c_K2,c0_K2 = psf_fitting(cube_wl_coll[1],psf[1],fwhm[1],comp_pos,psf_pos,full_output=True)
else:
### K1
x_K1,x0_K1,y_K1,y0_K1,f_K1,f0_K1,psf_flux_K1,psf0_flux_K1 = psf_fitting(cube_wl_coll[0],psf[0],fwhm[0],comp_pos,psf_pos)
### K2
x_K2,x0_K2,y_K2,y0_K2,f_K2,f0_K2,psf_flux_K2,psf0_flux_K2 = psf_fitting(cube_wl_coll[1],psf[1],fwhm[1],comp_pos,psf_pos)
# Save the spectrum and positions
if save_psf_fitting == True:
## Enumarate the stars
star_ids = np.linspace(1,len(comp_pos),len(comp_pos))
## Full output
if psf_fitting_contr == True:
### K1
f = open(psff_file_K1, 'w')
f.write("star_id x x_err y y_err flux flux_err contrast_mag " +
"contrast_mag_err" + '\n')
starline = ('central_star' + ' ' +
'{:.6f}'.format(psf_pos[0]) + ' ' +
'{:.6f}'.format(0) + ' ' +
'{:.6f}'.format(psf_pos[1]) + ' ' +
'{:.6f}'.format(0) + ' ' +
'{:.6f}'.format(psf_flux_K1) + ' ' +
'{:.6f}'.format(psf0_flux_K1) + ' ' +
'{:.6f}'.format(99.99) + ' ' +
'{:.6f}'.format(99.99) + '\n')
f.write(starline)
for i in range(len(comp_pos)):
line = ('{}'.format(star_ids[i]) + ' ' +
'{:.6f}'.format(x_K1[i]) + ' ' +
'{:.6f}'.format(x0_K1[i]) + ' ' +
'{:.6f}'.format(y_K1[i]) + ' ' +
'{:.6f}'.format(y0_K1[i]) + ' ' +
'{:.6f}'.format(f_K1[i]) + ' ' +
'{:.6f}'.format(f0_K1[i]) + ' ' +
'{:.6f}'.format(c_K1[i]) + ' ' +
'{:.6f}'.format(c0_K1[i]) + '\n')
f.write(line)
f.close()
### K2
f = open(psff_file_K2, 'w')
f.write("star_id x x_err y y_err flux flux_err contrast_mag " +
"contrast_mag_err" + '\n')
starline = ('central_star' + ' ' +
'{:.6f}'.format(psf_pos[0]) + ' ' +
'{:.6f}'.format(0) + ' ' +
'{:.6f}'.format(psf_pos[1]) + ' ' +
'{:.6f}'.format(0) + ' ' +
'{:.6f}'.format(psf_flux_K2) + ' ' +
'{:.6f}'.format(psf0_flux_K2) + ' ' +
'{:.6f}'.format(99.99) + ' ' +
'{:.6f}'.format(99.99) + '\n')
f.write(starline)
for i in range(len(comp_pos)):
line = ('{}'.format(star_ids[i]) + ' ' +
'{:.6f}'.format(x_K2[i]) + ' ' +
'{:.6f}'.format(x0_K2[i]) + ' ' +
'{:.6f}'.format(y_K2[i]) + ' ' +
'{:.6f}'.format(y0_K2[i]) + ' ' +
'{:.6f}'.format(f_K2[i]) + ' ' +
'{:.6f}'.format(f0_K2[i]) + ' ' +
'{:.6f}'.format(c_K2[i]) + ' ' +
'{:.6f}'.format(c0_K2[i]) + '\n')
f.write(line)
f.close()
else:
### K1
f.write("star_id x x_err y y_err flux flux_err" + '\n')
starline = ('central_star' + ' ' +
'{:.6f}'.format(psf_pos[0]) + ' ' +
'{:.6f}'.format(0) + ' ' +
'{:.6f}'.format(psf_pos[1]) + ' ' +
'{:.6f}'.format(0) + ' ' +
'{:.6f}'.format(psf_flux_K1) + ' ' +
'{:.6f}'.format(psf0_flux_K1) + '\n')
f.write(starline)
for i in range(len(comp_pos)):
line = ('{}'.format(star_ids[i]) + ' ' +
'{:.6f}'.format(x_K1[i]) + ' ' +
'{:.6f}'.format(x0_K1[i]) + ' ' +
'{:.6f}'.format(y_K1[i]) + ' ' +
'{:.6f}'.format(y0_K1[i]) + ' ' +
'{:.6f}'.format(f_K1[i]) + ' ' +
'{:.6f}'.format(f0_K1[i]) + '\n')
f.write(line)
f.close()
### K2
f = open(psff_file_K2, 'w')
f.write("star_id x x_err y y_err flux flux_err" + '\n')
starline = ('central_star' + ' ' +
'{:.6f}'.format(psf_pos[0]) + ' ' +
'{:.6f}'.format(0) + ' ' +
'{:.6f}'.format(psf_pos[1]) + ' ' +
'{:.6f}'.format(0) + ' ' +
'{:.6f}'.format(psf_flux_K2) + ' ' +
'{:.6f}'.format(psf0_flux_K2) + '\n')
f.write(starline)
for i in range(len(comp_pos)):
line = ('{}'.format(star_ids[i]) + ' ' +
'{:.6f}'.format(x_K2[i]) + ' ' +
'{:.6f}'.format(x0_K2[i]) + ' ' +
'{:.6f}'.format(y_K2[i]) + ' ' +
'{:.6f}'.format(y0_K2[i]) + ' ' +
'{:.6f}'.format(f_K2[i]) + ' ' +
'{:.6f}'.format(f0_K2[i]) + '\n')
f.write(line)
f.close()
# Spectrum extraction with NM
if extract_spec == True:
## Define some parameters
f_guess_pl = max(final_sum_K1) # Flux first guess
f_range_K1 = np.zeros((len(final_sum_K1),200))
f_range_K2 = np.zeros((len(final_sum_K2),200))
for i in range(0,len(final_sum_K1)):
f_range_K1[i] = np.linspace(0.*np.abs(final_sum_K1[i]), 1.5*np.abs(final_sum_K1[i]),200)
f_range_K2[i] = np.linspace(0.*np.abs(final_sum_K2[i]), 1.5*np.abs(final_sum_K2[i]),200)
p_in = np.array([radial_dist,PA]) # Regroup companion positions
simplex_options = {'xtol':1e-2, 'maxiter':500, 'maxfev':1000} # Set the simplex options
simplex_guess_K1 = np.zeros((len(radial_dist),3)) # Set the simplex variable: r, PA, flux for every companion - K1
simplex_guess_K2 = np.zeros((len(radial_dist),3)) # Set the simplex variable: r, PA, flux for every companion - K2
## Start Simplex
for i in range(len(wl)):
print("Companion index: ", i + 1) # Companions for IRDIS
### K1
comp_xycoord = [[comp_pos[i][0],comp_pos[i][1]]] # Companion coords
simplex_K1 = vip_hci.negfc.firstguess(cube[0],-angs,psf_norm[0],ncomp=ncomp_pca,plsc=pxscale,planets_xy_coord=comp_xycoord,fwhm=fwhm[0],annulus_width=ann_width,aperture_radius=aper_radius,simplex_options=simplex_options,f_range=f_range_K1[i],simplex=True,fmerit='sum',collapse='median',svd_mode='lapack',scaling=None,verbose=False,plot=False,save=False) # This takes some time
x_simplex = simplex_K1[0] * np.cos(np.deg2rad(simplex_K1[1])) + centx
y_simplex = simplex_K1[0] * np.sin(np.deg2rad(simplex_K1[1])) + centy
comp_xycoord = [[x_simplex[0],y_simplex[0]]] # Companion coords
simplex_guess_K1[i] = vip_hci.negfc.firstguess(cube[0],-angs,psf_norm[0],ncomp=ncomp_pca,plsc=pxscale,planets_xy_coord=comp_xycoord,fwhm=fwhm[0],annulus_width=ann_width,aperture_radius=aper_radius,simplex_options=simplex_options,f_range=f_range_K1[i],simplex=True,fmerit='sum',collapse='median',svd_mode='lapack',scaling=None,verbose=False,plot=False,save=False) # This takes some time
### K2
comp_xycoord = [[comp_pos[i][0],comp_pos[i][1]]] # Companion coords
simplex_K2 = vip_hci.negfc.firstguess(cube[1],-angs,psf_norm[1],ncomp=ncomp_pca,plsc=pxscale,planets_xy_coord=comp_xycoord,fwhm=fwhm[1],annulus_width=ann_width,aperture_radius=aper_radius,simplex_options=simplex_options,f_range=f_range_K2[i],simplex=True,fmerit='sum',collapse='median',svd_mode='lapack',scaling=None,verbose=False,plot=False,save=False) # This takes some time
x_simplex = simplex_K2[0] * np.cos(np.deg2rad(simplex_K2[1])) + centx
y_simplex = simplex_K2[0] * np.sin(np.deg2rad(simplex_K2[1])) + centy
comp_xycoord = [[x_simplex[0],y_simplex[0]]] # Companion coords
simplex_guess_K2[i] = vip_hci.negfc.firstguess(cube[1],-angs,psf_norm[1],ncomp=ncomp_pca,plsc=pxscale,planets_xy_coord=comp_xycoord,fwhm=fwhm[1],annulus_width=ann_width,aperture_radius=aper_radius,simplex_options=simplex_options,f_range=f_range_K2[i],simplex=True,fmerit='sum',collapse='median',svd_mode='lapack',scaling=None,verbose=False,plot=False,save=False) # This takes some time
print("K1: ", simplex_guess_K1[i])
print("K2: ", simplex_guess_K2[i])
## Save the spectrum
if save_spec == True:
np.savetxt(sspec_file_K1, simplex_guess_K1, delimiter=' ') # Saves to file
np.savetxt(sspec_file_K2, simplex_guess_K2, delimiter=' ')
# Spectrum extraction with MCMC
if extract_mcmc == True:
instru= 'IRDIS36059' # Define instrument parameters
ann_width=annulus_width # Annulus width of MCMC
aperture_radius=aperture_width # Aperture radius
fig_merit='sum' # Summation figure of merit
outpath = mcmc_path.format(source) # Path to save MCMC files
print("########## MCMC Sampling starting... ##########")
nwalkers, itermin, itermax = (100,200,500) # as recommended by Oli W
for i in range(len(final_sum)): # For each wavelength channel
initialState = simplex_guess[i] # Take r, PA and flux from simplex
bounds=[[0.75*initialState[0],1.25*initialState[0]],[0.75*initialState[1],1.25*initialState[1]],[0.75*initialState[2],1.30*initialState[2]]] # Initiate bounds
output_file = source+'_IRDIS_companions_{}'.format(i) # Save to output file
chain_40 = vip.negfc.mcmc_negfc_sampling(cube[i], -angs, psf_scaled[i], ncomp_pca, pxscale, initialState, ann_width,
aperture_radius, cube_ref=None, svd_mode='lapack', nwalkers=nwalkers,
bounds=bounds, niteration_min=itermin,
niteration_limit=itermax, check_maxgap=50, nproc= ncores,
output_file=output_file, display=True,verbose=True, save=True,
rhat_threshold=1.01, niteration_supp=0, fmerit=fig_merit) # MCMC run per channel
print("########## MCMC Sampling done! ##########")
## Read MCMC files
if read_mcmc == True:
import pickle # Import important MCMC libraries
from pickle import Pickler
pickler={}
mcmc_result={}
outpath = mcmc_path.format(source) # Path to save MCMC files
for i in range(0,3): # Read all channels and store them to variables
with open(outpath+source+'K1_IRDIS_companion_S{}/MCMC_results'.format(i),'rb') as fi:
pickler["myPickler{}".format(i)] = pickle.Unpickler(fi)
mcmc_result["mcmc_result{}".format(i)] = pickler["myPickler{}".format(i)].load()
## Create variable to store MCMC results
final_pos = []
final_PA = []
final_contr = []
final_pos_gauss = []
final_PA_gauss = []
final_contr_gauss = []
## Obtain r, PA, flux and error values
for i in range(0,len(wl)):
mcmc = mcmc_result["mcmc_result{}".format(i)]
chain_40 = mcmc['chain']
index = np.where(mcmc['AR']>0.4)
print('Companion number: ', i)
burnin = 0.3
chain_40_g = chain_40[index]
isamples_flat = chain_40_g[:,int(chain_40_g.shape[1]//(1/burnin)):,:].reshape((-1,3))
mu,sigma = vip_hci.negfc.mcmc_sampling.confidence(isamples_flat,
cfd = 68,
gaussian_fit = True,
verbose=True,
save=False,
full_output=False,
title=source,
edgecolor = 'm',facecolor = 'b',range=())
#
# pKey = ['r','theta','f']
# PA = val_max[pKey[1]]-90
# if PA < -180: PA = val_max[pKey[1]]+360
#
# final_pos.append([val_max[pKey[0]],conf[pKey[0]][0],conf[pKey[0]][1]])
# final_PA.append([PA,conf[pKey[1]][0],conf[pKey[1]][1]])
# final_contr.append([val_max[pKey[2]],conf[pKey[2]][0],conf[pKey[2]][1]])
# final_pos_gauss.append([mu[0],sigma[0]])
# final_PA_gauss.append([mu[1],sigma[1]])
# final_contr_gauss.append([mu[2],sigma[2]])
#
# # Get values into usuable arrays
# spectra_gauss = np.array([item[0] for item in final_contr_gauss])
# spectra_gauss_err = np.array([item[1] for item in final_contr_gauss])
# Read FASTWIND calibrated spectra
if fastwind == True:
## Open file
f = open(fastwind_path + 'FLUXCONT', 'r')
## Read and ignore header lines
header1 = f.readline()
# Define the wavelength and fnue
fast_wavel = 0
fast_flux = 0
fast_wavel = np.array([])
fast_flux = np.array([])
## Loop over lines and extract variables of interest within the wavelength range of IRDIS
for line in f:
line = line.strip()
columns = line.split()
if float(columns[1]) > 9400 and float(columns[1]) < 16400:
fast_wavel = np.append(fast_wavel,float(columns[1]))
fast_flux = np.append(fast_flux,float(columns[2]))
if float(columns[1]) < 9300:
break
f.close()
# The flux is measured the opposite way as for IRDIS
fast_wavel = fast_wavel[::-1]
fast_flux = fast_flux[::-1]
## Adjust the model spectra to the same wavelengths as IRDIS
model_spectra = np.interp(wl*1e4,fast_wavel,fast_flux)
## Define some parameters
model_flux = np.zeros_like(wl)
## Put the flux from frequency to wavelength space
for i in range(len(wl)):
model_flux[i] = (c*1e10) / (wl[i]*1e4)**2 * 10**(model_spectra[i])
## Scale the flux to a distance of Ro
flux = model_flux/(dist_fast)**2 * (120*rad_fast)**2 #Use 100 Ro for distance to flux measurement
# Companion spectrum calibration
if calib_spec == True:
## If spectrum file is specified, then read and store it
if sspec_file is not None:
simplex_guess = np.zeros((39,3)) # Set the simplex variable: r, PA, flux
f = open(sspec_file,'r')
for line in f:
line = line.strip()
columns = line.split()
simplex_guess[:][0] = float(columns[0])
simplex_guess[:][1] = float(columns[1])
simplex_guess[:][2] = float(columns[2])
f.close()
## Read calibrated spectrum and store it
if calib_star_spec_path is not None:
calib_spectrum = np.zeros_like(wl)
f = open(calib_star_spec_path,'r')
for line in f:
line = line.strip()
columns = line.split()
calib_spectrum = float(columns)
f.close()
## Calculate the contrast spectrum of the companion
contr_spectra = np.zeros_like(wl)
contr_spectra_err = np.zeros_like(wl)
for i in range(0,len(wl)):
contr_spectra[i] = simplex_guess[i][2]/psf_final_sum[i] # Spectrum
contr_spectra_err[i] = (spectra_gauss_err[i]/simplex_guess[i][2])*(simplex_guess[i][2]/psf_final_sum[i]) # Error on contrast spectrum !! Requires prior MCMC reading !!
## Calculate the calibrated companion spectrum
calib_comp_spec = contr_spectra * calib_spectrum
calib_comp_spec_err = calib_comp_spec * contr_spectra_err/contr_spectra
## Save the calibrated spectrum with errors if requested
if save_calib_spec == True:
comp_spec = np.zeros((39,2))
for i in range(0,39):
comp_spec[i][0] = calib_comp_spec
comp_spec[i][1] = calib_comp_spec_err
np.savetxt(cspec_file, comp_spec, delimiter=' ')
| 31,489 | 51.222222 | 393 | py |
SHIPS | SHIPS-master/__init__.py | from __future__ import (absolute_import)
# import vip
# from vip.phot.fakecomp import inject_fcs_cube, inject_fc_frame, psf_norm
__version__ = "1.0.0"
print("------------------------------------")
print(" _____ _ _ _____ _____ _____ ")
print(" / ____| | | |_ _| __ \ / ____|")
print(" | (___ | |__| | | | | |__) | (___ ")
print(" \___ \| __ | | | | ___/ \___ \ ")
print(" ____) | | | |_| |_| | ____) |")
print(" |_____/|_| |_|_____|_| |_____/ ")
print("------------------------------------")
print(" SPHERE High-contrast Imaging Pipeline " + "\n" " for massive Stars v"+__version__)
print("------------------------------------")
print(" ")
print("Please cite Rainot et al. 2020")
print("whenever you publish data with SHIPS.")
| 808 | 35.772727 | 90 | py |
21cmVAE | 21cmVAE-main/VeryAccurateEmulator/emulator.py | import h5py
import tensorflow as tf
from tqdm.keras import TqdmCallback
import numpy as np
from VeryAccurateEmulator import __path__
import VeryAccurateEmulator.preprocess as pp
PATH = __path__[0] + "/"
def _gen_model(in_dim, hidden_dims, out_dim, activation_func, name=None):
"""
Generate a new keras model.
Parameters
----------
in_dim : int or None
The dimension of the input layer of the model. Should be None if the
model is succeeding another model (e.g. a decoder in an autoencoder).
hidden_dims : list of ints
The dimension of the hidden layers of the model.
out_dim : int
The dimension of the output layer of the model.
activation_func: str or instance of tf.keras.activations
Activation function between hidden layers. Must be recognizable by
keras.
name : str or None
Name of the model. Default : None.
Returns
-------
model : tf.keras.Model
The generated keras model.
"""
layers = []
if in_dim is not None:
input_layer = tf.keras.Input(shape=(in_dim,))
layers.append(input_layer)
if len(hidden_dims):
for dim in hidden_dims:
layer = tf.keras.layers.Dense(dim, activation=activation_func)
layers.append(layer)
output_layer = tf.keras.layers.Dense(out_dim)
layers.append(output_layer)
model = tf.keras.Sequential(layers, name=name)
return model
def relative_mse_loss(signal_train):
"""
The square of the FoM in the paper, in units of standard deviation as the
signals are preproccesed.
Parameters
----------
signal_train : np.ndarray
Training signals.
Returns
-------
loss_function : callable
The loss function.
"""
def loss_function(y_true, y_pred):
# unpreproc signal to get the amplitude
mean = tf.convert_to_tensor(
np.mean(signal_train, axis=0) / np.std(signal_train)
)
signal = y_true + mean
# get amplitude in units of standard deviation of signals
reduced_amp = tf.math.reduce_max(
tf.abs(signal), axis=1, keepdims=False
)
# loss is mse / square of amplitude
loss = tf.keras.metrics.mean_squared_error(y_true, y_pred)
loss /= tf.keras.backend.square(reduced_amp)
return loss
return loss_function
NU_0 = 1420405751.7667 # Hz, rest frequency of 21-cm line
def redshift2freq(z):
"""
Convert redshift to frequency.
Parameters
----------
z : float or np.ndarray
The redshift or array of redshifts to convert.
Returns
-------
nu : float or np.ndarray
The corresponding frequency or array of frequencies in MHz.
"""
nu = NU_0 / (1 + z)
nu /= 1e6 # convert to MHz
return nu
def freq2redshift(nu):
"""
Convert frequency to redshfit.
Parameters
----------
nu : float or np.ndarray
The frequency or array of frequencies in MHz to convert.
Returns
-------
z : float or np.ndarray
The corresponding redshift or array of redshifts.
"""
nu *= 1e6 # to Hz
z = NU_0 / nu - 1
return z
def error(
true_signal, pred_signal, relative=True, nu_arr=None, flow=None, fhigh=None
):
"""
Compute the error (Eq. 1 in the paper) given the true and predicted
signal(s).
Parameters
----------
true_signal : np.ndarray
The true signal(s). An array of temperature for different redshifts
or frequencies. For multiple signals must each row correspond to a
signal.
pred_signal : np.ndarray
The predicted signal(s). Must have the same shape as true_signal.
relative : bool
Whether to compute the error in % relative to the signal amplitude
(True) or in mK (False). Default : True.
nu_arr : np.ndarray or None
The frequency array corresponding to the signals. Needed for computing
the error in different frequency bands. Default : None.
flow : float or None
The lower bound of the frequency band to compute the error in. Cannot
be set without nu_arr. Default : None.
fhigh : float or None
The upper bound of the frequency bnd to compute the error in. Cannot
be set without nu_arr. Default : None.
Returns
-------
err : float or np.ndarray
The computed errors. An array if multiple signals were input.
Raises
------
ValueError :
If nu_arr is None and flow or fhigh are not None.
"""
if (flow or fhigh) and nu_arr is None:
raise ValueError(
"No frequency array is given, cannot compute error in specified"
"frequency band."
)
if len(pred_signal.shape) == 1:
pred_signal = np.expand_dims(pred_signal, axis=0)
true_signal = np.expand_dims(true_signal, axis=0)
if flow and fhigh:
f = np.argwhere((nu_arr >= flow) & (nu_arr <= fhigh))[:, 0]
elif flow:
f = np.argwhere(nu_arr >= flow)
elif fhigh:
f = np.argwhere(nu_arr <= fhigh)
if flow or fhigh:
pred_signal = pred_signal[:, f]
true_signal = true_signal[:, f]
err = np.sqrt(np.mean((pred_signal - true_signal) ** 2, axis=1))
if relative: # give error as fraction of amplitude in the desired band
err /= np.max(np.abs(true_signal), axis=1)
err *= 100 # %
return err
# default parameters
hidden_dims = [288, 352, 288, 224]
redshifts = np.linspace(5, 50, 451)
with h5py.File(PATH + "dataset_21cmVAE.h5", "r") as hf:
par_train = hf["par_train"][:]
par_val = hf["par_val"][:]
par_test = hf["par_test"][:]
signal_train = hf["signal_train"][:]
signal_val = hf["signal_val"][:]
signal_test = hf["signal_test"][:]
class DirectEmulator:
def __init__(
self,
par_train=par_train,
par_val=par_val,
par_test=par_test,
signal_train=signal_train,
signal_val=signal_val,
signal_test=signal_test,
hidden_dims=hidden_dims,
activation_func="relu",
redshifts=redshifts,
frequencies=None,
):
"""
The direct emulator class. This class provides the user interface for
building, training, and using a Direct Emulator such as 21cmVAE.
The default parameters are the ones used by 21cmVAE.
Parameters
----------
par_train : np.ndarray
Parameters in training set.
par_val : np.ndarray
Parameters in validation set.
par_test : np.ndarray
Parameters in test set.
signal_train : np.ndarray
Signals in training set.
signal_val : np.ndarray
Signals in validation set.
signal_test : np.ndarray
Signals in test set.
hidden_dims : list of ints
List of dimensions of the hidden layers. Should be an empty list
if there are no hidden layers.
activation_func: str or instance of tf.keras.activations
Activation function between hidden layers. Must be recognizable by
keras.
redshifts : np.ndarray or None
Array of redshifts corresponding to the signals used.
frequencies : np.ndarray or None
Array of frequencies corresponding to the signals used.
Attributes
----------
par_train : np.ndarray
Parameters in training set.
par_val : np.ndarray
Parameters in validation set.
par_test : np.ndarray
Parameters in test set.
signal_train : np.ndarray
Signals in training set.
signal_val : np.ndarray
Signals in validation set.
signal_test : np.ndarray
Signals in test set.
par_labels : list of str
The names of the astrophysical parameters.
emulator : tf.keras.Model
The emulator.
redshifts : np.ndarray or None
Array of redshifts corresponding to the signals used.
frequencies : np.ndarray or None
Array of frequencies corresponding to the signals used.
Methods
-------
load_model : load an exsisting model.
train : train the emulator.
predict : use the emulator to predict global signals from astrophysical
input parameters
test_error : compute the test set error of the emulator.
save : save the class instance with all attributes.
"""
self.par_train = par_train
self.par_val = par_val
self.par_test = par_test
self.signal_train = signal_train
self.signal_val = signal_val
self.signal_test = signal_test
self.par_labels = [
"fstar",
"Vc",
"fx",
"tau",
"alpha",
"nu_min",
"Rmfp",
]
self.emulator = _gen_model(
self.par_train.shape[-1],
hidden_dims,
self.signal_train.shape[-1],
activation_func,
name="emulator",
)
if frequencies is None:
if redshifts is not None:
frequencies = redshift2freq(redshifts)
elif redshifts is None:
redshifts = freq2redshift(frequencies)
self.redshifts = redshifts
self.frequencies = frequencies
def load_model(self, model_path=PATH + "models/emulator.h5"):
"""
Load a saved model. The default parameter is the path to the saved
state of 21cmVAE as described in the paper.
Parameters
----------
model_path : str
The path to the saved model.
Raises
------
IOError : if model_path does not point to a valid model.
"""
custom_obj = {"loss_function": relative_mse_loss(self.signal_train)}
self.emulator = tf.keras.models.load_model(
model_path, custom_objects=custom_obj
)
def train(self, epochs, callbacks=[], verbose="tqdm"):
"""
Train the emulator.
Parameters
----------
epochs : int
Number of epochs to train for.
callbacks : list of tf.keras.callbacks.Callback
Callbacks to pass to the training loop. Default : []
verbose : 0, 1, 2, or "tqdm"
Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per
epoch, "tqdm" = use progress bar from tqdm. Default : "tqdm"
Returns
-------
loss : list of floats
Training set losses.
val_loss : list of floats
Validation set losses.
"""
X_train = pp.par_transform(self.par_train, self.par_train)
X_val = pp.par_transform(self.par_val, self.par_train)
y_train = pp.preproc(self.signal_train, self.signal_train)
y_val = pp.preproc(self.signal_val, self.signal_train)
if verbose == "tqdm":
callbacks.append(TqdmCallback())
verbose = 0
hist = self.emulator.fit(
x=X_train,
y=y_train,
batch_size=256,
epochs=epochs,
validation_data=(X_val, y_val),
validation_batch_size=256,
callbacks=callbacks,
verbose=verbose,
)
loss = hist.history["loss"]
val_loss = hist.history["val_loss"]
return loss, val_loss
def predict(self, params):
"""
Predict a (set of) global signal(s) from astrophysical parameters.
Parameters
----------
params : np.ndarray
The values of the astrophysical parameters. Must be in the order
given by the attrbiute par_labels. To predict a set of global
signals, input a 2d-array where each row correspond to a different
set of parameters.
Returns
-------
pred : np.ndarray
The predicted global signal(s).
"""
transformed_params = pp.par_transform(params, self.par_train)
proc_pred = self.emulator.predict(transformed_params)
pred = pp.unpreproc(proc_pred, self.signal_train)
if pred.shape[0] == 1:
return pred[0, :]
else:
return pred
def test_error(self, relative=True, flow=None, fhigh=None):
"""
Compute the error of the emulator for each signal in the test set.
Parameters
----------
relative : bool
Whether to compute the error in % relative to the signal amplitude
(True) or in mK (False). Default : True.
flow : float or None
The lower bound of the frequency band to compute the error in.
Default : None.
fhigh : float or None
The upper bound of the frequency bnd to compute the error in.
Default : None.
Returns
-------
err : np.ndarray
The computed errors.
"""
err = error(
self.signal_test,
self.predict(self.par_test),
relative=relative,
nu_arr=self.frequencies,
flow=flow,
fhigh=fhigh,
)
return err
def save(self):
raise NotImplementedError("Not implemented yet.")
class AutoEncoder(tf.keras.models.Model):
def __init__(
self,
signal_train=signal_train,
enc_hidden_dims=[],
dec_hidden_dims=[],
latent_dim=9,
activation_func="relu",
):
"""
Helper class that controls the autoencoder for the autoencoder-based
emulator.
Parameters
----------
signal_train : np.ndarray
The signals in the training set. Default : the signals defined in
the file "dataset_21cmVAE.h5", used by 21cmVAE
enc_hidden_dims : list of ints
The dimensions of the hidden layers of the encoder. Default : []
dec_hidden_dims : list of ints
The dimensions of the hidden layers of the decoder. Default : []
latent_dim : int
The dimension of the latent layer. Default : 9
activation_func: str or instance of tf.keras.activations
Activation function between hidden layers. Must be recognizable by
keras. Default : "relu"
Attributes
----------
encoder : tf.keras.Model
The encoder of the autoencoder.
decoder : tf.keras.Model
The decoder of the autoencoder.
Methods
-------
call : use the autoencoder to reconstruct the input.
"""
super().__init__()
self.encoder = _gen_model(
signal_train.shape[-1],
enc_hidden_dims,
latent_dim,
activation_func,
name="encoder",
)
self.decoder = _gen_model(
None,
dec_hidden_dims,
signal_train.shape[-1],
activation_func,
name="decoder",
)
def call(self, signals):
"""
Reconstruct the given input with the autoencoder.
Parameters
----------
x : np.ndarray
The signals to reconstruct with the autoencoder.
Returns
-------
reconstructed : np.ndarray
The reconstructed signals.
"""
reconstructed = self.decoder(self.encoder(signals))
return reconstructed
# default parameters
latent_dim = 9
enc_hidden_dims = [352]
dec_hidden_dims = [32, 352]
em_hidden_dims = [352, 352, 352, 224]
class AutoEncoderEmulator:
def __init__(
self,
par_train=par_train,
par_val=par_val,
par_test=par_test,
signal_train=signal_train,
signal_val=signal_val,
signal_test=signal_test,
latent_dim=latent_dim,
enc_hidden_dims=enc_hidden_dims,
dec_hidden_dims=dec_hidden_dims,
em_hidden_dims=em_hidden_dims,
activation_func="relu",
redshifts=redshifts,
frequencies=None,
):
"""
The autoencoder-based emulator class. This class provides the user
interface for building, training, and using an autoencoder-based
emulator, as described in Appendix A of the paper.
The default parameters are the ones used in Appendix A.
Parameters
----------
par_train : np.ndarray
Parameters in training set.
par_val : np.ndarray
Parameters in validation set.
par_test : np.ndarray
Parameters in test set.
signal_train : np.ndarray
Signals in training set.
signal_val : np.ndarray
Signals in validation set.
signal_test : np.ndarray
Signals in test set.
latent_dim : int
The dimension of the latent layer.
enc_hidden_dims : list of ints
The dimensions of the hidden layers of the encoder.
dec_hidden_dims : list of ints
The dimensions of the hidden layers of the decoder.
em_hidden_dims : list of ints
The dimensions of the hidden layers of the emulator.
activation_func: str or instance of tf.keras.activations
Activation function between hidden layers. Must be recognizable by
keras.
redshifts : np.ndarray or None
Array of redshifts corresponding to the signals used.
frequencies : np.ndarray or None
Array of frequencies corresponding to the signals used.
Attributes
----------
par_train : np.ndarray
Parameters in training set.
par_val : np.ndarray
Parameters in validation set.
par_test : np.ndarray
Parameters in test set.
signal_train : np.ndarray
Signals in training set.
signal_val : np.ndarray
Signals in validation set.
signal_test : np.ndarray
Signals in test set.
par_labels : list of str
The names of the astrophysical parameters.
autoencoder : AutoEncoder
An instance of the AutoEncoder class defined in this module.
emulator : tf.keras.Model
The emulator.
redshifts : np.ndarray or None
Array of redshifts corresponding to the signals used.
frequencies : np.ndarray or None
Array of frequencies corresponding to the signals used.
Methods
-------
load_model : load an exsisting model.
train : train the autoencoder and the emulator.
predict : use the emulator and decoder to predict global signals from
astrophysical input parameters
test_error : compute the test set error of the autoencoder or the
autoencider-based emulator.
save : save the class instance with all attributes.
"""
self.par_train = par_train
self.par_val = par_val
self.par_test = par_test
self.signal_train = signal_train
self.signal_val = signal_val
self.signal_test = signal_test
self.par_labels = [
"fstar",
"Vc",
"fx",
"tau",
"alpha",
"nu_min",
"Rmfp",
]
if frequencies is None:
if redshifts is not None:
frequencies = redshift2freq(redshifts)
elif redshifts is None:
redshifts = freq2redshift(frequencies)
self.redshifts = redshifts
self.frequencies = frequencies
autoencoder = AutoEncoder(
self.signal_train,
enc_hidden_dims,
dec_hidden_dims,
latent_dim,
activation_func,
)
# build autoencoder by calling it on a batch of data
autoencoder.build((None, self.signal_train.shape[-1]))
self.autoencoder = autoencoder
self.emulator = _gen_model(
self.par_train.shape[-1],
em_hidden_dims,
latent_dim,
activation_func,
name="ae_emualtor",
)
AE_PATH = PATH + "models/autoencoder_based_emulator/"
def load_model(
self,
emulator_path=AE_PATH + "ae_emulator.h5",
encoder_path=AE_PATH + "encoder.h5",
decoder_path=AE_PATH + "decoder.h5",
):
"""
Load a saved model. Default parameters are the paths to the models used
in Appendix A of the paper.
Parameters
----------
emulator_path : str
The path to the saved emulator.
encoder_path : str
The path to the saved encoder.
decoder_path : str
The path to the saved decoder.
Raises
------
IOError : if model_path does not point to a valid model.
"""
self.emulator = tf.keras.models.load_model(emulator_path)
encoder = tf.keras.models.load_model(encoder_path)
decoder = tf.keras.models.load_model(decoder_path)
autoencoder = AutoEncoder(signal_train=self.signal_train)
autoencoder.encoder = encoder
autoencoder.decoder = decoder
# build autoencoder by calling it on a batch of data
_ = autoencoder(pp.preproc(self.signal_test, self.signal_train))
self.autoencoder = autoencoder
def train(self, epochs, ae_callbacks=[], em_callbacks=[], verbose="tqdm"):
"""
Train the autoencoder and the emulator.
Parameters
----------
epochs : int
Number of epochs to train for.
ae_callbacks : list of tf.keras.callbacks.Callback
Callbacks to pass to the training method of the autoencoder.
Default : []
em_callbacks : list of tf.keras.callbacks.Callback
Callbacks to pass to the training method of the emulator.
Default : []
verbose : 0, 1, 2, or "tqdm"
Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per
epoch, "tqdm" = use progress bar from tqdm. Default : "tqdm"
Returns
-------
ae_loss : list of floats
Training set losses for the autoencoder.
ae_val_loss : list of floats
Validation set losses for the autoencoder.
loss : list of floats
Training set losses for the emulator.
val_loss : list of floats
Validation set losses for the emulator.
"""
y_train = pp.preproc(self.signal_train, self.signal_train)
y_val = pp.preproc(self.signal_val, self.signal_train)
if verbose == "tqdm":
ae_callbacks.append(TqdmCallback())
em_callbacks.append(TqdmCallback())
verbose = 0
hist = self.autoencoder.fit(
x=y_train,
y=y_train,
batch_size=256,
epochs=epochs,
validation_data=(y_val, y_val),
callbacks=ae_callbacks,
verbose=verbose,
)
ae_loss = hist.history["loss"]
ae_val_loss = hist.history["val_loss"]
X_train = pp.par_transform(self.par_train, self.par_train)
X_val = pp.par_transform(self.par_val, self.par_train)
y_train = self.autoencoder.encoder.predict(y_train)
y_val = self.autoencoder.encoder.predict(y_val)
hist = self.emulator.fit(
x=X_train,
y=y_train,
batch_size=256,
epochs=epochs,
validation_data=(X_val, y_val),
callbacks=em_callbacks,
verbose=verbose,
)
loss = hist.history["loss"]
val_loss = hist.history["val_loss"]
return ae_loss, ae_val_loss, loss, val_loss
def predict(self, params):
"""
Predict a (set of) global signal(s) from astrophysical parameters.
Parameters
----------
params : np.ndarray
The values of the astrophysical parameters. Must be in the order
given by the attrbiute par_labels. To predict a set of global
signals, input a 2d-array where each row correspond to a different
set of parameters.
Returns
-------
pred : np.ndarray
The predicted global signal(s).
"""
transformed_params = pp.par_transform(params, self.par_train)
em_pred = self.emulator.predict(transformed_params)
decoded = self.autoencoder.decoder.predict(em_pred)
pred = pp.unpreproc(decoded, self.signal_train)
if pred.shape[0] == 1:
return pred[0, :]
else:
return pred
def test_error(
self, use_autoencoder=False, relative=True, flow=None, fhigh=None
):
"""
Compute the error of the autoencoder or the autoencoder-based emulator
for each signal in the test set.
Parameters
----------
use_auteoncoder : bool
Compute the errors of the autoencoder (True) or the emulator
(False). Default : False
relative : bool
Whether to compute the error in % relative to the signal amplitude
(True) or in mK (False). Default : True.
flow : float or None
The lower bound of the frequency band to compute the error in.
Default : None.
fhigh : float or None
The upper bound of the frequency bnd to compute the error in.
Default : None.
Returns
-------
err : np.ndarray
The computed errors.
"""
if use_autoencoder:
pred = pp.unpreproc(
self.autoencoder(
pp.preproc(self.signal_test, self.signal_train)
),
self.signal_train,
)
else:
pred = self.predict(self.par_test)
err = error(
self.signal_test,
pred,
relative=relative,
nu_arr=self.frequencies,
flow=flow,
fhigh=fhigh,
)
return err
| 26,244 | 30.132859 | 79 | py |
21cmVAE | 21cmVAE-main/VeryAccurateEmulator/__init__.py | __version__ = "3.1.0"
__author__ = "Christian Hellum Bye"
from pathlib import Path
HERE = __file__[: -len("__init__.py")]
if not Path(HERE + "dataset_21cmVAE.h5").exists():
import requests
print("Downloading dataset.")
r = requests.get(
"https://zenodo.org/record/5084114/files/dataset_21cmVAE.h5?download=1"
)
with open(HERE + "dataset_21cmVAE.h5", "wb") as f:
f.write(r.content)
from VeryAccurateEmulator import emulator
from VeryAccurateEmulator import preprocess
| 508 | 24.45 | 79 | py |
21cmVAE | 21cmVAE-main/VeryAccurateEmulator/preprocess.py | import numpy as np
def preproc(signal: np.ndarray, signal_train: np.ndarray) -> np.ndarray:
"""
Preprocess all the signals in a dataset.
Parameters
----------
signal : np.ndarray
Array of signals to preprocess.
signal_train : np.ndarray
Array of the training set signals.
Returns
-------
proc_signal : np.ndarray
The preprocessed signals.
"""
proc_signal = signal.copy()
proc_signal -= np.mean(signal_train, axis=0) # subtract mean
proc_signal /= np.std(signal_train) # divide by standard deviation
return proc_signal
def unpreproc(signal: np.ndarray, signal_train: np.ndarray) -> np.ndarray:
"""
Inverse of preproc function.
Parameters
----------
signal : np.ndarray
Array of preprocesed signals to unpreprocess.
signal_train : np.ndarray
Array of the training set signals used for preprocessing.
Returns
--------
unproc_signal : np.ndarray
Array of the unpreprocessed signals.
"""
unproc_signal = signal * np.std(signal_train)
unproc_signal += np.mean(signal_train, axis=0)
return unproc_signal
def par_transform(
parameters: np.ndarray, params_train: np.ndarray
) -> np.ndarray:
"""
Preprocess a set of parameters the same way that the training
set parameters are processed:
that is, take log of first three columns and apply a linear map that makes
all the training set parameters be in the range [-1, 1]. Note that this
map will not send other sets of parameters to [-1, 1].
Parameters
----------
parameters : np.ndarray
Array of parameters.
params_train : np.ndarray
The parameters used to train the model.
Returns
-------
newparams : np.ndarray
The processed parameters.
"""
if len(np.shape(parameters)) == 1:
parameters = np.expand_dims(parameters, axis=0)
# first copy the parameters and take log of first three
cols12 = parameters[:, :2].copy() # fstar and Vc
fx = parameters[:, 2].copy() # fx
fx[fx == 0] = 10 ** (-6) # to avoid -inf in cases where fx == 0
newcols12 = np.log10(cols12) # log of fstar and Vc
newfx = np.log10(fx) # log of fx
# initialize arrays with processed parameters:
newparams = np.empty(parameters.shape)
newparams[:, :2] = newcols12 # copy the log of fstar and Vc
newparams[:, 2] = newfx # the log of fx
newparams[:, 3:] = parameters[
:, 3:
].copy() # copy the remaining parameters
# do the same for the training params
cols12_tr = params_train[:, :2].copy() # fstar and Vc
fx_tr = params_train[:, 2].copy() # fx
fx_tr[fx_tr == 0] = 10 ** (-6) # to avoid -inf in cases where fx == 0
newcols12_tr = np.log10(cols12_tr) # log of fstar and Vc
newfx_tr = np.log10(fx_tr) # log of fx
newparams_tr = np.empty(params_train.shape)
newparams_tr[:, :2] = newcols12_tr # copy the log of fstar and Vc
newparams_tr[:, 2] = newfx_tr # the log of fx
newparams_tr[:, 3:] = params_train[:, 3:].copy() # remaining parameters
# get the max and min values of each parameter in the training set
maximum = np.max(newparams_tr, axis=0)
minimum = np.min(newparams_tr, axis=0)
# subtract min, divide by (max-min), multiply by 2 and subtract 1 to get
# parameters in the range [-1, 1] for the case of the training set
newparams -= minimum # subtract min to get the range [0, max-min]
newparams /= maximum - minimum # divide by (max-min) to get [0, 1]
newparams *= 2
newparams -= 1 # multiply by 2, subtract 1 to get [-1, 1]
return newparams
| 3,677 | 32.135135 | 78 | py |
21cmVAE | 21cmVAE-main/tests/test_emulator.py | import h5py
import numpy as np
import tensorflow as tf
from VeryAccurateEmulator import emulator, __path__
import VeryAccurateEmulator.preprocess as pp
FILE = __path__[0] + "/dataset_21cmVAE.h5"
with h5py.File(FILE, "r") as hf:
signal_train = hf["signal_train"][:]
def test_gen_model():
in_dim = 7
hidden_dims = [32, 64, 256]
out_dim = 451
model = emulator._gen_model(in_dim, hidden_dims, out_dim, "relu")
all_dims = hidden_dims + [out_dim]
assert len(model.layers) == len(all_dims)
for i, layer in enumerate(model.layers):
shape = layer.output_shape[-1]
assert shape == all_dims[i]
def test_relative_mse_loss():
loss_fcn = emulator.relative_mse_loss(signal_train)
y_true = tf.convert_to_tensor(pp.preproc(signal_train[:10], signal_train))
y_pred = tf.convert_to_tensor(pp.preproc(signal_train[-10:], signal_train))
mse = tf.keras.metrics.mean_squared_error(y_true, y_pred)
amplitude = tf.convert_to_tensor(
np.max(np.abs(signal_train[:10] / np.std(signal_train)), axis=1)
)
rel_mse = mse / tf.keras.backend.square(amplitude)
assert np.allclose(rel_mse.numpy(), loss_fcn(y_true, y_pred).numpy())
def test_z_nu():
z = 30
nu = emulator.redshift2freq(z)
assert np.isclose(z, emulator.freq2redshift(nu))
def test_error():
z = np.linspace(5, 50, 451)
nu = emulator.redshift2freq(z)
assert np.allclose(
emulator.error(signal_train, signal_train), np.zeros(len(signal_train))
)
# direct emulator class
direm = emulator.DirectEmulator()
direm.load_model()
def test_predict():
# some random parameters:
pars = direm.par_test[0]
pred = direm.predict(pars)
true = direm.signal_test[0]
assert pred.shape == true.shape
# the emulator has a max error of 1.84 %
assert np.sqrt(np.mean((pred - true) ** 2)) / np.max(np.abs(true)) < 0.02
# vectorized call
pars = direm.par_test[:10]
pred_signals = direm.predict(pars)
assert pred_signals[0].shape == pred.shape
assert np.allclose(pred_signals[0], pred, atol=5e-5)
assert pred_signals.shape == (10, true.shape[0])
def test_test_error():
err = direm.test_error()
assert err.shape == (direm.signal_test.shape[0],)
# compare to table 1 in Bye et al. (2021)
assert np.allclose(err.mean(), 0.34, atol=1e-2)
assert np.allclose(np.median(err), 0.29, atol=1e-2)
err_mk = direm.test_error(relative=False)
assert np.allclose(err_mk.mean(), 0.54, atol=1e-2)
assert np.allclose(np.median(err_mk), 0.50, atol=1e-2)
# autoencoder-based emulator class
ae_em = emulator.AutoEncoderEmulator()
ae_em.load_model()
def test_predict_ae():
# some random parameters:
pars = ae_em.par_test[0]
pred = ae_em.predict(pars)
true = ae_em.signal_test[0]
assert pred.shape == true.shape
# error should be less than 5 % in all cases
assert np.sqrt(np.mean((pred - true) ** 2)) / np.max(np.abs(true)) < 0.05
# vectorized call
pars = ae_em.par_test[:10]
pred_signals = ae_em.predict(pars)
assert pred_signals[0].shape == pred.shape
assert np.allclose(pred_signals[0], pred, atol=5e-5)
assert pred_signals.shape == (10, true.shape[0])
def test_test_error():
err = ae_em.test_error()
assert err.shape == (direm.signal_test.shape[0],)
# compare to appendix A in Bye et al. (2021)
assert np.allclose(err.mean(), 0.39, atol=1e-2)
assert np.allclose(np.median(err), 0.35, atol=1e-2)
err_ae = ae_em.test_error(use_autoencoder=True)
assert np.allclose(err_ae.mean(), 0.33, atol=1e-2)
assert np.allclose(np.median(err_ae), 0.29, atol=1e-2)
| 3,644 | 30.973684 | 79 | py |
21cmVAE | 21cmVAE-main/tests/test_preprocess.py | import h5py
import numpy as np
from VeryAccurateEmulator import __path__
import VeryAccurateEmulator.preprocess as pp
FILE = __path__[0] + "/dataset_21cmVAE.h5"
with h5py.File(FILE, "r") as hf:
signal_train = hf["signal_train"][:]
par_train = hf["par_train"][:]
def test_proc():
proc_signal = pp.preproc(signal_train, signal_train)
mean = np.mean(proc_signal, axis=0)
assert np.allclose(mean, np.zeros_like(mean), atol=1e-3)
unproc = pp.unpreproc(proc_signal, signal_train)
assert np.allclose(unproc, signal_train, atol=5e-5)
def test_par_transform():
transformed = pp.par_transform(par_train, par_train)
max_par = transformed.max(axis=0)
min_par = transformed.min(axis=0)
assert np.allclose(max_par, np.ones_like(max_par))
assert np.allclose(min_par, -1 * np.ones_like(min_par))
| 834 | 29.925926 | 60 | py |
THULAC-Python | THULAC-Python-master/setup.py | #coding: utf-8
from setuptools import setup, find_packages
setup(
name = 'thulac',
# packages = ['thulac_test'], # this must be the same as the name above
version = '0.1.1',
description = 'A efficient Chinese text segmentation tool',
author = 'thunlp',
url = 'https://github.com/thunlp/THULAC-Python', # use the URL to the github repo
author_email = 'liuzy@tsinghua.edu.cn',
download_url = 'https://github.com/thunlp/THULAC-Python/archive/master.zip', # I'll explain this in a second
classifiers=[
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
# 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
# 'Programming Language :: Python :: 3.2',
# 'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
keywords = ['segmentation'], # arbitrary keywords
packages = find_packages(),
package_data={'': ['*.txt', '*.dat', '*.bin', 'model_w']}
) | 1,072 | 38.740741 | 110 | py |
THULAC-Python | THULAC-Python-master/demo.py | #coding:utf-8
import thulac
thu1 = thulac.thulac(seg_only=True, model_path="请查看README下载相关模型放到thulac根目录或在这里写路径") #设置模式为行分词模式
a = thu1.cut("我爱北京天安门")
print(a)
| 161 | 17 | 96 | py |
THULAC-Python | THULAC-Python-master/thulac/__main__.py | import sys
import thulac
seg_only = False
if(len(sys.argv) >= 4 and sys.argv[3] == "-seg_only"):
seg_only = True
lac = thulac.thulac(seg_only=seg_only)
lac.cut_f(sys.argv[1], sys.argv[2]) | 190 | 20.222222 | 54 | py |
THULAC-Python | THULAC-Python-master/thulac/__init__.py | #__coding:utf-8
from .character.CBModel import CBModel
from .character.CBNGramFeature import CBNGramFeature
from .character.CBTaggingDecoder import CBTaggingDecoder
from .manage.Preprocesser import Preprocesser
from .manage.Postprocesser import Postprocesser
from .manage.Filter import Filter
from .manage.TimeWord import TimeWord
from .manage.Punctuation import Punctuation
from .manage.SoExtention import *
from .base.compatibility import decodeGenerator, cInputGenerator, encodeGenerator
from functools import reduce, partial
import itertools
import time
import os
import re
decode = decodeGenerator()
cInput = cInputGenerator()
encode = encodeGenerator()
'''程序入口,提供所有面向用户的接口'''
class thulac:
def __init__(self, user_dict = None, model_path = None, T2S = False, \
seg_only = False, filt = False, max_length = 50000, deli='_', rm_space=False):
'''初始化函数,传入用户设置的参数,并且根据参数初始化不同
模型(调入不同的.dat文件,该文件存储了一个双数组trie树)'''
self.__user_specified_dict_name = user_dict
self.__model_path_char = model_path
self.__separator = deli
self.__useT2S = T2S
self.__seg_only = seg_only
self.__use_filter = filt
self.__maxLength = max_length
self.rmSpace = rm_space
self.__coding = "utf-8"
self.__prefix = self.__setPrefix()
self.__poc_cands = []
self.__cws_tagging_decoder = None
self.__tagging_decoder = None
self.__preprocesserpreprocesser = Preprocesser(rm_space=rm_space)
self.__preprocesserpreprocesser.setT2SMap((self.__prefix+"t2s.dat"))
self.__nsDict = Postprocesser((self.__prefix+"ns.dat"), "ns", False)
self.__idiomDict = Postprocesser((self.__prefix+"idiom.dat"), "i", False)
self.__userDict = None
self.__timeword = TimeWord()
self.__punctuation = Punctuation(self.__prefix+"singlepun.dat")
self.__myfilter = None
self.__so = None
if(self.__user_specified_dict_name):
self.__userDict = Postprocesser(self.__user_specified_dict_name, "uw", True)
if(self.__use_filter):
self.__myfilter = Filter((self.__prefix+"xu.dat"), (self.__prefix+"time.dat"))
if(self.__seg_only):
self.__cws_tagging_decoder = CBTaggingDecoder()
self.__cws_tagging_decoder.init((self.__prefix+"cws_model.bin"), (self.__prefix+"cws_dat.bin"),(self.__prefix+"cws_label.txt"))
self.__cws_tagging_decoder.threshold = 0
self.__cws_tagging_decoder.separator = self.__separator
self.__cws_tagging_decoder.setLabelTrans()
else:
self.__tagging_decoder = CBTaggingDecoder()
self.__tagging_decoder.init((self.__prefix+"model_c_model.bin"),(self.__prefix+"model_c_dat.bin"),(self.__prefix+"model_c_label.txt"))
self.__tagging_decoder.threshold = 10000
self.__tagging_decoder.separator = self.__separator
self.__tagging_decoder.setLabelTrans()
def __setPrefix(self):
'''获取程序运行的路径,以此来确定models的绝对路径以及其他资源文件的路径'''
__prefix = ""
if(self.__model_path_char is not None):
__prefix = self.__model_path_char
if(__prefix[-1] != "/"):
__prefix = __prefix + "/"
else:
__prefix = os.path.dirname(os.path.realpath(__file__))+"/models/"
return __prefix
def __cutWithOutMethod(self, oiraw, cut_method, text = True):
'''分词,先将原始句子split为一个数组,之后遍历每一行,调用对单行分词的函数(有两种)。
text=True会返回分词好的字符串,为False则会返回一个二位数组方便用户做后续处理。
函数中有一些细节处理主要是用于规范输出格式'''
oiraw = oiraw.split('\n')
txt = ""
array = []
if(text):
for line in oiraw:
if(self.__seg_only):
temp_txt = reduce(lambda x, y: x + ' ' + y if y != " " else x, cut_method(line), '') + '\n'
else:
temp_txt = reduce(lambda x, y: x + ' ' + "".join(y), cut_method(line), '') + '\n'
txt += temp_txt[1:]
return txt[:-1]
else:
for line in oiraw:
if(line):
if(self.__seg_only):
array += (reduce(lambda x, y: x + [[y, '']], cut_method(line), []))
else:
array += (reduce(lambda x, y: x + [[y[0], y[2]]], cut_method(line), []))
array += [['\n', '']]
return array[:-1]
def cut(self, oiraw, text = False):
return self.__cutWithOutMethod(oiraw, self.__cutline, text = text)
def fast_cut(self, oiraw, text = False):
return self.__cutWithOutMethod(oiraw, self.__fast_cutline, text = text)
def __cutline(self, oiraw):
'''对单行进行分词,这段函数包含前处理preprogress.py以及一系列后处理,将分词结果返回为一个map'''
oiraw = decode(oiraw)
vec = []
if(len(oiraw) < self.__maxLength):
vec.append(oiraw)
else:
vec = self.__cutRaw(oiraw, self.__maxLength)
ans = []
for oiraw in vec:
if(self.__useT2S):
traw, __poc_cands = self.__preprocesserpreprocesser.clean(oiraw)
raw = self.__preprocesserpreprocesser.T2S(traw)
else:
raw, __poc_cands = self.__preprocesserpreprocesser.clean(oiraw)
# raw = oiraw
if(len(raw) > 0):
if(self.__seg_only):
tmp, tagged = self.__cws_tagging_decoder.segmentTag(raw, __poc_cands)
segged = self.__cws_tagging_decoder.get_seg_result()
if(self.__userDict is not None):
self.__userDict.adjustSeg(segged)
if(self.__use_filter):
self.__myfilter.adjustSeg(segged)
self.__nsDict.adjustSeg(segged)
self.__idiomDict.adjustSeg(segged)
self.__timeword.adjustSeg(segged)
self.__punctuation.adjustSeg(segged)
ans.extend(segged)
# return list(map(lambda x: encode(x), segged))
else:
tmp, tagged = self.__tagging_decoder.segmentTag(raw, __poc_cands)
if(self.__userDict is not None):
self.__userDict.adjustTag(tagged)
if(self.__use_filter):
self.__myfilter.adjustTag(tagged)
self.__nsDict.adjustTag(tagged)
self.__idiomDict.adjustTag(tagged)
self.__timeword.adjustTag(tagged)
self.__punctuation.adjustTag(tagged)
ans.extend(tagged)
if(self.__seg_only):
return map(lambda x: encode(x), ans)
else:
return map(lambda x: (encode(x[0]), encode(x[1]), encode(x[2])), ans)
def foo(self, x):
return x
def __SoInit(self):
'''fast_cut函数需要使用thulac.so,在这里导入.so文件'''
if(not self.__user_specified_dict_name):
self.__user_specified_dict_name = ''
return SoExtention(self.__prefix, self.__user_specified_dict_name, self.__useT2S, self.__seg_only)
def __fast_cutline(self, oiraw):
if(not self.__so):
self.__so = self.__SoInit()
result = self.__so.seg(oiraw).split()
if not self.__seg_only:
result = map(lambda x: re.split(r'(_)', x), result)
return result
def run(self):
'''命令行交互程序'''
while(True):
oiraw = cInput()
if(len(oiraw) < 1):
break
cutted = self.cut(oiraw, text = True)
print(cutted)
def cut_f(self, input_file, output_file):
input_f = open(input_file, 'r')
output_f = open(output_file, 'w')
for line in input_f:
cutted = self.cut(line, text = True)
output_f.write(cutted + "\n")
output_f.close()
input_f.close()
print("successfully cut file " + input_file + "!")
def fast_cut_f(self, input_file, output_file):
input_f = open(input_file, 'r')
output_f = open(output_file, 'w')
for line in input_f:
cutted = self.fast_cut(line, text = True)
output_f.write(cutted + "\n")
output_f.close()
input_f.close()
print("successfully cut file " + input_file + "!")
def __cutRaw(self, oiraw, maxLength):
'''现将句子按句子完结符号切分,如果切分完后一个句子长度超过限定值
,再对该句子进行切分'''
vec = []
m = re.findall(u".*?[。?!;;!?]", oiraw)
num, l, last = 0, 0, 0
for i in range(len(m)):
if(num + len(m[i]) >= maxLength):
vec.append("".join(m[last:i]))
last = i
num = len(m[i])
else:
num += len(m[i])
l += len(m[i])
if(len(oiraw)-l + num >= maxLength):
vec.append("".join(m[last:len(m)]))
vec.append(oiraw[l:])
else:
vec.append(oiraw[l-num:])
return vec
def multiprocessing_cut_f(self, input_file, output_file, core = 0):
from multiprocessing import Pool
if core:
p = Pool(1)
else:
p = Pool()
output_f = open(output_file, 'w')
input_f = open(input_file, 'r')
f = input_f.readlines()
# thu = thulac(seg_only=True)
cutline = partial(_cutline, self)
# print(cutline("我爱北京天安门"))
x = p.map(func_cutline, itertools.izip(itertools.repeat(self), f))
for line in x:
line_text = " ".join(line)
output_f.write(line_text)
output_f.close()
def cutline(self, oiraw):
return self.__cutline(oiraw)
def _cutline(lac, x):
return lac.cutline(x)
def func_cutline(lac_x):
"""Convert `f([1,2])` to `f(1,2)` call."""
return _cutline(*lac_x)
| 9,925 | 37.773438 | 146 | py |
THULAC-Python | THULAC-Python-master/thulac/character/CBTaggingDecoder.py | #coding = utf-8
from .CBModel import CBModel
from .CBNGramFeature import CBNGramFeature
from ..base.Node import Node
from ..base.Dat import Dat
from ..base.WordWithTag import WordWithTag
from ..base.AlphaBeta import AlphaBeta
import time
import array
class CBTaggingDecoder:
def __init__(self):
self.separator = '_'
self.maxLength = 50000
self.len = 0
self.sequence = ""
self.allowedLabelLists = []
for i in range(self.maxLength):
self.allowedLabelLists.append([])
self.pocsToTags = None
self.nGramFeature = None
self.dat = None
self.nodes = [Node() for i in range(self.maxLength)]
self.labelTrans = None
self.labelTransPre = None
self.labelTransPost = None
self.threshold = 0
self.allowCom = [0 for i in range(self.maxLength)]
self.tagSize = 0
self.model = None
self.alphas = None
# self.betas = None
def init(self, modelFile, datFile, labelFile):
self.model = CBModel(modelFile)
self.values = {}
self.result = {}
for i in range(self.maxLength):
pre = (i - 1)
self.nodes[i].predecessors = pre
pre = (i + 1)
self.nodes[i].successors = pre
self.dat = Dat(datFile)
self.nGramFeature = CBNGramFeature(self.dat, self.model)
self.labelInfo = ["" for i in range(10000)]
self.pocTags = []
for i in range(16):
self.pocTags.append([])
labelin = open(labelFile, "r")
line = ""
ind = 0
line = labelin.readline()
while(len(line) > 0):
self.labelInfo[ind] = line
segInd = int(line[0]) - int('0')
for j in range(16):
if(((1 << segInd) & j) != 0):
self.pocTags[j].append(ind)
ind = ind + 1
line = labelin.readline()
labelin.close()
self.pocsToTags = [[] for i in range(16)]
for j in range(1, 16, 1):
self.pocsToTags[j] = [0 for i in range(len(self.pocTags[j]) + 1)]
for k in range(len(self.pocTags[j])):
self.pocsToTags[j][k] = self.pocTags[j][k]
self.pocsToTags[j][len(self.pocTags[j])] = -1
self.labelLookingFor = [[] for i in range(self.model.l_size)]
for i in range(self.model.l_size):
self.labelLookingFor[i] = None
for i in range(self.model.l_size):
if(self.labelInfo[i][0] == '0' or self.labelInfo[i][0] == '3'):
continue
for j in range(i + 1):
if((self.labelInfo[i][1:] == self.labelInfo[j][1:]) and (self.labelInfo[j][0] == '0')):
if(self.labelLookingFor[j] is None):
self.labelLookingFor[j] = [0, 0]
self.labelLookingFor[j][0] = -1
self.labelLookingFor[j][1] = -1
self.tagSize = self.tagSize + 1
self.labelLookingFor[j][int(self.labelInfo[i][0])-1] = i
break
for i in range(self.maxLength):
self.allowedLabelLists[i] = None
self.isGoodChoice = [0 for i in range(self.maxLength * self.model.l_size)]
print("Model loaded succeed")
def dp(self):
if(self.allowedLabelLists[0] is None):
self.allowedLabelLists[0] = self.pocsToTags[9]
if(self.allowedLabelLists[self.len - 1] is None):
self.allowedLabelLists[self.len - 1] = self.pocsToTags[12]
alp = AlphaBeta()
self.result = {}
self.alphas = []
self.bestScore = alp.dbDecode(self.model.l_size, self.model.ll_weights, self.len, self.nodes, self.values, self.alphas, self.result, self.labelTransPre, self.allowedLabelLists)
self.allowedLabelLists[0] = None
self.allowedLabelLists[self.len - 1] = None
def setLabelTrans(self):
lSize = self.model.l_size
preLabels = [[] for i in range(lSize)]
postLabels = [[] for i in range(lSize)]
for i in range(lSize):
for j in range(lSize):
ni = int(self.labelInfo[i][0]) - 0
nj = int(self.labelInfo[j][0]) - 0
iIsEnd = ((ni == 2) or (ni == 3))
jIsBegin = ((nj == 0) or (nj == 3))
sameTag = self.labelInfo[i][1:] == self.labelInfo[j][1:]
if(sameTag):
if((ni == 0 and nj == 1) or \
(ni == 0 and nj == 2) or \
(ni == 1 and nj == 2) or \
(ni == 1 and nj == 1) or \
(ni == 2 and nj == 0) or \
(ni == 2 and nj == 3) or \
(ni == 3 and nj == 3) or \
(ni == 3 and nj == 0)):
preLabels[j].append(i)
postLabels[i].append(j)
else:
if(iIsEnd and jIsBegin):
preLabels[j].append(i)
postLabels[i].append(j)
self.labelTransPre = [[] for i in range(lSize)]
for i in range(lSize):
self.labelTransPre[i] = [0 for k in range(len(preLabels[i])+1)]
for j in range(len(preLabels[i])):
self.labelTransPre[i][j] = preLabels[i][j]
self.labelTransPre[i][len(preLabels[i])] = -1
self.labelTransPost = [[] for i in range(lSize)]
for i in range(lSize):
self.labelTransPost[i] = [0 for k in range(len(postLabels[i])+1)]
for j in range(len(postLabels[i])):
self.labelTransPost[i][j] = postLabels[i][j]
self.labelTransPost[i][len(postLabels[i])] = -1
def putValues(self):
if(self.len == 0):
return
for i in range(self.len):
self.nodes[i].type = 0
self.nodes[0].type += 1
self.nodes[self.len-1].type += 2
tmp = [0 for i in range(self.len * self.model.l_size)]
self.values = array.array("i", tmp)
self.nGramFeature.putValues(self.sequence, self.len, self.values)
self.values = tuple(self.values)
def segmentTag(self, raw, graph):
if(len(raw) == 0):
return 0, []
for i in range(len(raw)):
pocs = graph[i]
if(pocs != 0):
self.allowedLabelLists[i] = self.pocsToTags[pocs]
else:
self.allowedLabelLists[i] = self.pocsToTags[15]
self.sequence = raw
self.len = len(raw)
start = time.clock()
self.putValues()
self.dp()
offset = 0
if(len(self.labelInfo[0]) < 2):
return 1, []
ts = []
for i in range(self.len):
if(i not in self.result):
self.result[i] = 0
if((i == self.len-1) or (self.labelInfo[self.result[i]][0] == '2') or (self.labelInfo[self.result[i]][0] == '3')):
ts.append((self.sequence[offset:i+1], self.separator, self.labelInfo[self.result[i]][1:-1]))
offset = i + 1
return 1, ts
def get_seg_result(self):
segged = []
offset = 0
for i in range(self.len):
if((i == 0) or (self.labelInfo[self.result[i]][0] == '0') or (self.labelInfo[self.result[i]][0] == '3')):
segged.append(self.sequence[offset:i])
offset = i
segged.append(self.sequence[offset:self.len])
return segged[1:]
| 7,614 | 37.852041 | 184 | py |
THULAC-Python | THULAC-Python-master/thulac/character/CBNGramFeature.py | #coding = utf-8
# from ..base import Dat
import time
class CBNGramFeature:
SENTENCE_BOUNDARY = '#'
SEPERATOR = "_"
maxLength = 0
uniBases = []
biBases = []
datSize = []
dat = []
values = {}
def __init__(self, myDat, model):
self.SEPERATOR = ' '
self.datSize = myDat.getDatSize()
self.dat = myDat.getDat()
self.model = model
self.maxLength = 50000
self.uniBases = [0 for i in range(self.maxLength + 2)]
self.biBases = [0 for i in range(self.maxLength + 4)]
def addValues(self, valueOffset, base, dele, tmp):
ind = self.dat[2 * base] + dele
if(ind >= self.datSize or self.dat[2 * ind + 1] != base):
return
# offset = self.dat[2 * ind]
weightOffset = self.model.l_size * self.dat[2 * ind]
if(self.model.l_size == 4):
# pass
tmp[0] += self.model.fl_weights[weightOffset]
tmp[1] += self.model.fl_weights[weightOffset + 1]
tmp[2] += self.model.fl_weights[weightOffset + 2]
tmp[3] += self.model.fl_weights[weightOffset + 3]
else:
for i in range(self.model.l_size):
self.values[valueOffset + i] += self.model.fl_weights[weightOffset + i]
def findBases(self, datSize, ch1, ch2):
result = []
uniBase = 0
biBase = 0
ch1 = ord(ch1)
ch2 = ord(ch2)
if(ch1 > 32 and ch1 < 128):
ch1 += 65248
if(ch2 > 32 and ch2 < 128):
ch2 += 65248
if(ch1 >= datSize or 2 *self.dat[2 * ch1 + 1] != 0):
# uniBase = -1
# biBase = -1
# result = (-1, -1)
return -1, -1
uniBase = self.dat[2 * ch1] + ord(self.SEPERATOR)
ind = self.dat[2 * ch1] + ch2
if(ind >= datSize or self.dat[2 * ind + 1] != ch1):
return uniBase, -1
biBase = self.dat[2 * ind] + ord(self.SEPERATOR)
return uniBase, biBase
def putValues(self, sequence, mylen, values):
if(mylen >= self.maxLength):
print("larger than max")
return 1
self.values = values
self.uniBases[0], self.biBases[0] = self.findBases(self.datSize, self.SENTENCE_BOUNDARY, self.SENTENCE_BOUNDARY)
self.uniBases[0], self.biBases[1] = self.findBases(self.datSize, self.SENTENCE_BOUNDARY, sequence[0])
for i in range(mylen - 1):
self.uniBases[i + 1], self.biBases[i + 2] = self.findBases(self.datSize, sequence[i], sequence[i+1])
self.uniBases[mylen], self.biBases[mylen + 1] = self.findBases(self.datSize, sequence[mylen - 1], self.SENTENCE_BOUNDARY)
self.uniBases[mylen+1], self.biBases[mylen+2] = self.findBases(self.datSize, self.SENTENCE_BOUNDARY, self.SENTENCE_BOUNDARY)
for i in range(mylen):
tmp = [0, 0, 0, 0]
valueOffset = i * self.model.l_size
if((self.uniBases[i + 1]) != -1):
self.addValues(valueOffset, self.uniBases[i + 1], 49, tmp)
if((self.uniBases[i]) != -1):
self.addValues(valueOffset, self.uniBases[i], 50, tmp)
if((self.uniBases[i + 2]) != -1):
self.addValues(valueOffset, self.uniBases[i + 2], 51, tmp)
if(self.biBases[i + 1] != -1):
self.addValues(valueOffset, self.biBases[i + 1], 49, tmp)
if((self.biBases[i + 2]) != -1):
self.addValues(valueOffset, self.biBases[i + 2], 50, tmp)
if((self.biBases[i]) != -1):
self.addValues(valueOffset, self.biBases[i], 51, tmp)
if((self.biBases[i + 3]) != -1):
self.addValues(valueOffset, self.biBases[i + 3], 52, tmp)
self.values[valueOffset] += tmp[0]
self.values[valueOffset + 1] += tmp[1]
self.values[valueOffset + 2] += tmp[2]
self.values[valueOffset + 3] += tmp[3]
return 0
| 3,995 | 37.057143 | 132 | py |
THULAC-Python | THULAC-Python-master/thulac/character/CBModel.py | import struct
import binascii
import codecs
class CBModel:
DEC = 1000
l_size = 0
f_size = 0
ll_weights = []
fl_weights = []
ave_ll_weights = []
ave_fl_weights = []
def reset_ave_weights(self):
self.ave_ll_weights = [0.0 for i in range(l * l)]
self.ave_fl_weights = [0.0 for i in range(f * l)]
def update_ll_weights(self, i, j, delta, steps):
ind = i * self.l_size + j
self.ll_weights[ind] += delta
self.ave_ll_weights[ind] += steps * delta
def update_fl_weights(self, i, j, delta, steps):
ind = i * self.l_size + j
self.fl_weights[ind] += delta
self.ave_fl_weights[ind] += steps * delta
def average(self, step):
for i in range(self.l_size * self.f_size):
self.fl_weights[i] = int((float(self.fl_weights[i]) - self.ave_fl_weights[i] / float(step)) * self.DEC + 0.5)
for i in range(self.l_size * self.l_size):
self.ll_weights[i] = int((float(self.ll_weights[i]) - self.ave_ll_weights[i] / float(step)) * self.DEC + 0.5)
def byteToInt(self, s):
ans = ""+s[6]+s[7]+s[4]+s[5]+s[2]+s[3]+s[0]+s[1]
ans = int(ans, 16)
if(s[6] >= '8'):
return -((1 << 32) - ans)
return ans
def __init__(self, filename):
inputfile = open(filename, "rb")
temp = inputfile.read(4)
temp = codecs.decode(binascii.hexlify(temp))
self.l_size = self.byteToInt(temp)
temp = inputfile.read(4)
temp = codecs.decode(binascii.hexlify(temp))
self.f_size = self.byteToInt(temp)
temp = inputfile.read(4 * self.l_size * self.l_size)
self.ll_weights = struct.unpack("<"+str(self.l_size * self.l_size)+"i", temp)
self.ll_weights = tuple(self.ll_weights)
temp = inputfile.read(4 * self.f_size * self.l_size)
self.fl_weights = struct.unpack("<"+str(self.f_size * self.l_size)+"i", temp)
inputfile.close()
if __name__ == '__main__':
mod = CBModel("cws_model.bin")
print(len(mod.fl_weights))
| 2,094 | 30.742424 | 121 | py |
THULAC-Python | THULAC-Python-master/thulac/character/__init__.py | 0 | 0 | 0 | py | |
THULAC-Python | THULAC-Python-master/thulac/base/compatibility.py | #coding: utf-8
import sys
from ctypes import c_char, c_char_p, cast, POINTER, c_wchar_p
'''本模块用于兼容python2和python3,所有函数都会返回适用于对应版本的处理函数'''
isPython2 = sys.version_info[0] == 2
def decodeGenerator():
'''兼容2的decode函数'''
if(isPython2):
return lambda s: s.decode('utf-8')
return lambda s: s
def encodeGenerator():
'''兼容2的encode函数'''
if(isPython2):
return lambda s: s.encode('utf-8')
return lambda s: s
def cInputGenerator():
'''兼容2的raw_input和3的input函数'''
if(isPython2):
return lambda : raw_input().strip()
else:
return lambda : input().strip()
def chrGenerator():
'''兼容2的unichr和3的chr函数'''
if(isPython2):
return lambda s: unichr(s)
else:
return lambda s: chr(s)
def fixC_char_p():
'''针对python3中ctypes中c_char_p字符串参数需要手动转码的函数'''
if(isPython2):
return lambda s: s
return lambda s: s.encode('utf-8')
| 911 | 23 | 61 | py |
THULAC-Python | THULAC-Python-master/thulac/base/AlphaBeta.py | import time
class AlphaBeta:
value = 0
nodeId = 0
labelId = 0
def __init__(self):
self.value = 0
self.nodeId = -2
self.labelId = 0
def dbDecode(self, l_size, llWeights, nodeCount, nodes, values, alphas, result, preLabels, allowedLabelLists):
nodeId = 0
pNodeId = []
pPreLabel = []
pAllowedLabel = []
k = 0
j = 0
tmp = AlphaBeta()
best = (0, -1, 0)
preAlpha = AlphaBeta()
score = 0
index = 0
index2 = 0
index3 = 0
for i in range(nodeCount):
alphas.append({})
pAllowedLabel = allowedLabelLists[i]
for j in pAllowedLabel:
if(j == -1):
break
tmp = (0, -2, 0)
nodeId = nodes[i].predecessors
pPreLabel = preLabels[j]
if(nodeId >= 0):
for k in pPreLabel:
if(k == -1):
break
if(k not in alphas[nodeId]):
continue
preAlpha = alphas[nodeId][k]
if(preAlpha[1] == -2):
continue
score = preAlpha[0] + llWeights[k*l_size + j]
if((tmp[1] < 0) or (score > tmp[0])):
tmp = (score, nodeId, k)
if((nodes[i].type == 1) or (nodes[i].type == 3)):
tmp = (tmp[0] + values[i*l_size + j], -1, tmp[2])
else:
tmp = (tmp[0] + values[i*l_size + j], tmp[1], tmp[2])
if(nodes[i].type >= 2):
if((best[1] == -1) or best[0] < tmp[0]):
best = (tmp[0], i, j)
alphas[i][j] = tmp
tmp = best
while(tmp[1] >= 0):
result[tmp[1]] = tmp[2]
if(tmp[2] in alphas[tmp[1]]):
tmp = alphas[tmp[1]][tmp[2]]
else:
break
return best[0]
| 2,117 | 28.830986 | 114 | py |
THULAC-Python | THULAC-Python-master/thulac/base/Node.py | class Node:
type = 0
predecessors = []
successors = []
| 67 | 12.6 | 21 | py |
THULAC-Python | THULAC-Python-master/thulac/base/Dat.py | #coding: utf-8
import struct
import os
import functools
import sys
class Dat:
def __init__(self, filename=None, datSize=None, oldDat=None):
if(filename):
try:
inputfile = open(filename, "rb")
except:
print("open file %s failed" % filename)
sys.exit()
self.datSize = int(os.path.getsize(filename) / 8)
s = inputfile.read(8 * self.datSize)
tmp = "<"+str(self.datSize*2)+"i"
self.dat = struct.unpack(tmp, s)
self.dat = tuple(self.dat)
inputfile.close()
else:
self.dat = oldDat
self.dat = tuple(self.dat)
self.datSize = datSize
def printDat(self, filename):
f = open(filename, "w")
for i in range(self.datSize):
f.write(""+self.dat[2 * i]+" "+self.dat[2 * i + 1]+"\n")
f.close()
def getIndex(self, base, character):
ind = self.dat[2 * base] + character
if((ind >= self.datSize) or self.dat[2 * ind + 1] != base):
return -1
return ind
def search(self, sentence, bs, es):
bs = []
es = []
empty = True
for offset in range(len(sentence)):
preBase = 0
preInd = 0
ind = 0
for i in range(offset, len(sentence)):
ind = preBase + sentence[i]
if(ind < 0 or ind >= self.datSize or self.dat[2 * ind + 1] != preInd):
break
preInd = ind
preBase = self.dat[2 * ind]
ind = preBase
if(not (ind < 0 or ind >= self.datSize or self.dat[2 * ind + 1] != preInd)):
bs.append(offset)
es.append(i + 1)
if(empty):
empty = False
return not empty
def match(self, word):
ind = 0
base = 0
for i in range(len(word)):
ind = self.dat[2 * ind] + ord(word[i])
if((ind >= self.datSize) or (self.dat[2 * ind + 1] != base)):
return -1
base = ind
ind = self.dat[2 * base]
if((ind < self.datSize) and (self.dat[2 * ind + 1] == base)):
return ind
return -1
def update(self, word, value):
base = self.match(word)
if(base >= 0):
self.dat[2 * base] = value
def getInfo(self, prefix):
ind = 0
base = 0
for i in range(len(prefix)):
ind = self.dat[2 * ind] + ord(prefix[i])
if((ind >= self.datSize) or self.dat[2 * ind + 1] != base):
return i
base = ind
return -base
def getDatSize(self):
return self.datSize
def getDat(self):
return self.dat
def compareWords(firstLex, secondLex):
minSize = min(len(firstLex[0]), len(secondLex[0]))
for i in range(minSize):
if(firstLex[0][i] > secondLex[0][i]):
return 1
if(firstLex[0][i] < secondLex[0][i]):
return -1
if(len(firstLex[0]) > len(secondLex[0])):
return 1
if(len(firstLex[0]) < len(secondLex[0])):
return -1
return 0
class DATMaker(Dat):
def __init__(self):
self.head = 0
self.tail = 0
self.datSize = 1
self.dat = [1, -1]
def use(self, ind):
if(self.dat[2 * ind + 1] >= 0):
print("cell reused!")
if(self.dat[2 * ind] == 1):
self.head = self.dat[2 * ind + 1]
else:
self.dat[2 * (-self.dat[2 * ind]) +1] = self.dat[2 * ind +1]
if(self.dat[2 * ind + 1] == -self.datSize):
self.tail = self.dat[2 * ind]
else:
self.dat[2 * (-self.dat[2 * ind + 1])] = self.dat[2 * ind]
self.dat[2 * ind + 1] = ind
def extends(self):
oldSize = self.datSize
self.datSize *= 2
# self.dat = [self.dat[i] if i < 2*oldSize else 0 for i in range(2*self.datSize)]
for i in range(oldSize):
self.dat.append(-(oldSize + i - 1))
self.dat.append(-(oldSize + i + 1))
self.dat[2 * oldSize] = self.tail
if(-self.tail >= 0):
self.dat[2 * (-self.tail) + 1] = -oldSize;
self.tail = -(oldSize * 2 - 1)
def shrink(self):
last = self.datSize - 1
while(self.dat[2 * last + 1] < 0):
last -= 1
self.datSize = last + 1
self.dat = [self.dat[i] for i in range(2 * self.datSize)]
def alloc(self, offsets):
size = len(offsets)
base = -self.head
while(1):
if(base == self.datSize):
self.extends()
if(size):
while(2 * (base + ord(offsets[size - 1])) >= self.datSize):
self.extends()
flag = True
if(self.dat[2 * base + 1] >= 0):
flag = False
else:
for i in range(size):
if(self.dat[2 * (base + ord(offsets[i])) + 1] >= 0 or self.dat[2 * (base + ord(offsets[i])) + 1] >=0):
flag = False
break
if(flag):
self.use(base)
for i in range(size):
self.use(base + ord(offsets[i]))
return base
if(self.dat[2 * base + 1] == -self.datSize):
self.extends()
base = -self.dat[2 * base + 1]
def genChildren(self, lexicon, start, prefix, children):
del children[:]
l = len(prefix)
for ind in range(start, len(lexicon)):
word = lexicon[ind][0]
if(len(word) < l):
return
for i in range(l):
if(word[i] != prefix[i]):
return
if(len(word) > l):
a = word[l]
if(not children or word[l] != children[-1]):
children.append(word[l])
def assign(self, check, offsets, isWord=False):
base = self.alloc(offsets)
self.dat[2 * base] = 0
if(isWord):
self.dat[2 * base + 1] = check
else:
self.dat[2 * base + 1] = base
for i in range(len(offsets)):
self.dat[2 * (base + ord(offsets[i]))] = 0
self.dat[2 * (base + ord(offsets[i])) + 1] = check
self.dat[2 * check] = base
return base
def makeDat(self, lexicon, noPrefix=0):
# print(type(lexicon))
if(sys.version_info[0] == 2):
lexicon = sorted(lexicon, cmp=compareWords)
else:
lexicon = sorted(lexicon, key=functools.cmp_to_key(compareWords))
# for i in lexicon:
# print i[0].encode('utf-8')
size = len(lexicon)
children = []
prefix = ""
self.genChildren(lexicon, 0, prefix, children)
base = self.assign(0, children, True)
self.dat[0] = base
for i in range(len(lexicon)):
word = lexicon[i][0]
off = Dat.getInfo(self, word)
if(off <= 0):
off = len(word)
for offset in range(off, len(word)+1):
prefix = ""
for j in range(offset):
prefix += word[j]
pBase = -Dat.getInfo(self, prefix)
self.genChildren(lexicon, i, prefix, children)
base = self.assign(pBase,children,offset == len(word))
off = -Dat.getInfo(self, word)
self.dat[2 * self.dat[2 * off]] = lexicon[i][1]
self.shrink()
if __name__ == '__main__':
dat = DATMaker()
lexicon = []
f = open("../../cs.txt", "r")
for i, line in enumerate(f):
line = line.split()
lexicon.append([line[0].decode("utf-8"), i])
f.close()
dat.makeDat(lexicon)
# dat.shrink
print(len(dat.dat)/2)
print(dat.dat[:500])
| 7,965 | 30.239216 | 122 | py |
THULAC-Python | THULAC-Python-master/thulac/base/__init__.py | 0 | 0 | 0 | py | |
THULAC-Python | THULAC-Python-master/thulac/base/WordWithTag.py | class WordWithTag:
word = ""
tag = ""
separator = ''
def __init__(self, separator):
self.separator = separator
| 137 | 14.333333 | 34 | py |
THULAC-Python | THULAC-Python-master/thulac/manage/verbword.py | from ..base.Dat import Dat
class VerbWord():
def __init__(self, filename1, filename2):
self.__vmDat = Dat(filename=filename1)
self.__vdDat = Dat(filename=filename2)
self.__tagV = 'v'
def adjustTag(self, sentence):
if(not self.__vmDat or not self.__vdDat):
return
for i in range(len(sentence)-1):
if(sentence[i].tag == self.__tagV and sentence[i+1].tag == self.__tagV):
if(self.__vmDat.match(sentence[i].word) != -1):
sentence[i].tag = 'vm'
elif(self.__vdDat.match(sentence[i+1].word) != -1):
sentence[i+1].tag = 'vd'
| 557 | 26.9 | 75 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.