repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/NTU.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
from .base import Datasets
from torchvision import transforms, set_image_backend
import random, os
from PIL import Image
import numpy as np
def SubSetSampling_func(inputs, reduce=2):
print('Total training examples', len(inputs))
sample_dict = {}
for p, n, l in inputs:
if l not in sample_dict:
sample_dict[l] = [(p, n)]
else:
sample_dict[l].append((p, n))
sample_dict = dict([(k, v[::reduce]) for k, v in sample_dict.items()])
inputs = [(p, n, l) for l, v in sample_dict.items() for p, n in v]
print('Total training examples after sampling', len(inputs))
return inputs
class NTUData(Datasets):
def __init__(self, args, ground_truth, modality, phase='train'):
super(NTUData, self).__init__(args, ground_truth, modality, phase)
# sub-set sampling
# self.inputs = SubSetSampling_func(self.inputs)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
sl = self.get_sl(self.inputs[index][1])
if self.typ == 'rgb':
self.data_path = os.path.join(self.dataset_root, 'ImagesResize', self.inputs[index][0])
if self.typ == 'depth':
self.data_path = os.path.join(self.dataset_root, 'nturgb+d_depth_masked', self.inputs[index][0][:-4])
self.clip, skgmaparr = self.image_propose(self.data_path, sl)
if self.args.Network == 'FusionNet' or self.args.model_ema:
assert self.typ == 'rgb'
self.data_path = os.path.join(self.dataset_root, 'nturgb+d_depth_masked', self.inputs[index][0][:-4])
self.clip1, skgmaparr1 = self.image_propose(self.data_path, sl)
return (self.clip.permute(0, 3, 1, 2), self.clip1.permute(0, 3, 1, 2)), (skgmaparr, skgmaparr1), \
self.inputs[index][2], self.data_path
return self.clip.permute(0, 3, 1, 2), skgmaparr.permute(0, 3, 1, 2), self.inputs[index][2], self.inputs[index][0]
def get_path(self, imgs_path, a):
if self.typ == 'rgb':
return os.path.join(imgs_path, "%06d.jpg" % int(a + 1))
elif self.typ == 'depth':
return os.path.join(imgs_path, "MDepth-%08d.png" % int(a + 1))
def __len__(self):
return len(self.inputs)
| 2,462 | 35.761194 | 121 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/UCF101.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
from .base import Datasets
from torchvision import transforms, set_image_backend
import random, os
from PIL import Image
import numpy as np
def SubSetSampling_func(inputs, reduce=2):
print('Total training examples', len(inputs))
sample_dict = {}
for p, n, l in inputs:
if l not in sample_dict:
sample_dict[l] = [(p, n)]
else:
sample_dict[l].append((p, n))
sample_dict = dict([(k, v[::reduce]) for k, v in sample_dict.items()])
inputs = [(p, n, l) for l, v in sample_dict.items() for p, n in v]
print('Total training examples after sampling', len(inputs))
return inputs
class UCFData(Datasets):
def __init__(self, args, ground_truth, modality, phase='train'):
super(UCFData, self).__init__(args, ground_truth, modality, phase)
# sub-set sampling
# self.inputs = SubSetSampling_func(self.inputs)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
sl = self.get_sl(self.inputs[index][1])
self.data_path = os.path.join(self.dataset_root, self.inputs[index][0])
self.clip, skgmaparr = self.image_propose(self.data_path, sl)
if self.args.Network == 'FusionNet' or self.args.model_ema:
assert self.typ == 'rgb'
self.data_path = os.path.join(self.dataset_root, 'nturgb+d_depth_masked', self.inputs[index][0][:-4])
self.clip1, skgmaparr1 = self.image_propose(self.data_path, sl)
return (self.clip.permute(0, 3, 1, 2), self.clip1.permute(0, 3, 1, 2)), (skgmaparr, skgmaparr1), \
self.inputs[index][2], self.data_path
return self.clip.permute(0, 3, 1, 2), skgmaparr.permute(0, 3, 1, 2), self.inputs[index][2], self.inputs[index][0]
def get_path(self, imgs_path, a):
return os.path.join(imgs_path, "%06d.jpg" % int(a))
def __len__(self):
return len(self.inputs)
| 2,117 | 34.898305 | 121 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/datasets/build.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
from .distributed_sampler import DistributedSampler
from .IsoGD import IsoGDData
from .NvGesture import NvData
from .THU_READ import THUREAD
from .Jester import JesterData
from .NTU import NTUData
from .UCF101 import UCFData
from .base import Datasets
import logging
from torch.utils.data.sampler import WeightedRandomSampler
def build_dataset(args, phase):
modality = dict(
M='rgb',
K='depth',
F='Flow'
)
assert args.type in modality, 'Error in modality! The currently supported modalities include: M (RGB), K (Depth) and F (Flow)'
Datasets_func = dict(
basic=Datasets,
NvGesture=NvData,
IsoGD=IsoGDData,
THUREAD=THUREAD,
Jester=JesterData,
NTU=NTUData,
)
assert args.dataset in Datasets_func, 'Error in dataset Function!'
if args.local_rank == 0:
logging.info('Dataset:{}, Modality:{}'.format(args.dataset, modality[args.type]))
splits = args.splits + '/{}.txt'.format(phase)
dataset = Datasets_func[args.dataset](args, splits, modality[args.type], phase=phase)
print(dataset)
if args.dist:
data_sampler = DistributedSampler(dataset)
else:
data_sampler = None
if phase == 'train':
return torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, num_workers=args.num_workers,
shuffle=(data_sampler is None),
sampler=data_sampler, pin_memory=True, drop_last=False), data_sampler
else:
args.test_batch_size = int(1.5 * args.batch_size)
return torch.utils.data.DataLoader(dataset, batch_size=args.test_batch_size, num_workers=args.num_workers,
shuffle=False,
sampler=data_sampler, pin_memory=True, drop_last=False), data_sampler | 1,992 | 36.603774 | 130 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/DSN.py | '''
This file is modified from:
https://github.com/deepmind/kinetics-i3d/i3d.py
'''
import torch
import torch.nn as nn
from einops.layers.torch import Rearrange
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cv2
import os, math
import sys
from .DTN import DTNNet
from .FRP import FRP_Module
from .utils import *
import os, math
import sys
sys.path.append('../../')
from collections import OrderedDict
from utils import load_pretrained_checkpoint
import logging
class DSNNet(nn.Module):
VALID_ENDPOINTS = (
'Conv3d_1a_7x7',
'MaxPool3d_2a_3x3',
'Conv3d_2b_1x1',
'Conv3d_2c_3x3',
'MaxPool3d_3a_3x3',
'Mixed_3b',
'Mixed_3c',
'MaxPool3d_4a_3x3',
'Mixed_4b',
'Mixed_4c',
'MaxPool3d_5a_2x2',
'Mixed_5b',
'Mixed_5c'
)
def __init__(self, args, num_classes=400, spatial_squeeze=True, name='inception_i3d', in_channels=3, dropout_keep_prob=0.5,
pretrained: str = False,
dropout_spatial_prob: float=0.0,
frames_drop_rate: float=0.0):
super(DSNNet, self).__init__()
self._num_classes = num_classes
self._spatial_squeeze = spatial_squeeze
self.logits = None
self.args = args
self.end_points = {}
'''
Low Level Features Extraction
'''
end_point = 'Conv3d_1a_7x7'
self.end_points[end_point] = Unit3D(in_channels=in_channels, output_channels=64, kernel_shape=[1, 7, 7],
stride=(1, 2, 2), padding=(0, 3, 3), name=name + end_point)
end_point = 'MaxPool3d_2a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2),
padding=0)
end_point = 'Conv3d_2b_1x1'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=64, kernel_shape=[1, 1, 1], padding=0,
name=name + end_point)
end_point = 'Conv3d_2c_3x3'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=192, kernel_shape=[1, 3, 3],
padding=(0, 1, 1),
name=name + end_point)
end_point = 'MaxPool3d_3a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2),
padding=0)
'''
Spatial Multi-scale Features Learning
'''
end_point = 'Mixed_3b'
self.end_points[end_point] = SpatialInceptionModule(192, [64, 96, 128, 16, 32, 32], name + end_point)
end_point = 'Mixed_3c'
self.end_points[end_point] = SpatialInceptionModule(256, [128, 128, 192, 32, 96, 64], name + end_point)
end_point = 'MaxPool3d_4a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2),
padding=0)
end_point = 'Mixed_4b'
self.end_points[end_point] = SpatialInceptionModule(128 + 192 + 96 + 64, [192, 96, 208, 16, 48, 64], name + end_point)
end_point = 'Mixed_4c'
self.end_points[end_point] = SpatialInceptionModule(192 + 208 + 48 + 64, [160, 112, 224, 24, 64, 64], name + end_point)
end_point = 'MaxPool3d_5a_2x2'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 2, 2], stride=(1, 2, 2),
padding=0)
end_point = 'Mixed_5b'
self.end_points[end_point] = SpatialInceptionModule(160 + 224 + 64 + 64, [256, 160, 320, 32, 128, 128],
name + end_point)
end_point = 'Mixed_5c'
self.end_points[end_point] = SpatialInceptionModule(256 + 320 + 128 + 128, [384, 192, 384, 48, 128, 128],
name + end_point)
self.LinearMap = nn.Sequential(
nn.LayerNorm(1024),
nn.Linear(1024, 512),
)
self.avg_pool = nn.AdaptiveAvgPool3d((None, 1, 1))
self.dropout = nn.Dropout(dropout_keep_prob)
self.build()
self.dtn = DTNNet(args, num_classes=self._num_classes)
self.rrange = Rearrange('b c t h w -> b t c h w')
self.frames_droupout = torch.nn.Dropout2d(p=frames_drop_rate, inplace=False)
if args.frp:
self.frp_module = FRP_Module(w=args.w, inplanes=64)
if pretrained:
load_pretrained_checkpoint(self, pretrained)
def build(self):
for k in self.end_points.keys():
self.add_module(k, self.end_points[k])
def forward(self, x, garr):
inp = x
for end_point in self.VALID_ENDPOINTS:
if end_point in self.end_points:
if end_point in ['Mixed_3b']:
x = self._modules[end_point](x)
if self.args.frp:
x = self.frp_module(x, garr) + x
elif end_point in ['Mixed_4b']:
x = self._modules[end_point](x)
if self.args.frp:
x = self.frp_module(x, garr) + x
f = x
elif end_point in ['Mixed_5b']:
x = self._modules[end_point](x)
if self.args.frp:
x = self.frp_module(x, garr) + x
else:
x = self._modules[end_point](x)
feat = x
x = self.avg_pool(x).view(x.size(0), x.size(1), -1).permute(0, 2, 1)
x = self.LinearMap(x)
x = self.frames_droupout(x)
cnn_vison = self.rrange(f.sum(dim=1, keepdim=True))
logits, distillation_loss, (att_map, cosin_similar, MHAS, visweight) = self.dtn(x)
# return logits, distillation_loss, (cnn_vison[0], att_map, cosin_similar, visweight, MHAS, (feat, inp[0, :]))
return logits, distillation_loss, (cnn_vison[0], None, cosin_similar, visweight, (feat.data, inp[0, :]))
| 6,208 | 37.092025 | 127 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/FRP.py | '''
This file is modified from:
https://github.com/zhoubenjia/RAAR3DNet/blob/master/Network_Train/lib/model/RAAR3DNet.py
'''
import torch
import torch.nn as nn
from einops.layers.torch import Rearrange
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
import numpy as np
import cv2
from torchvision.utils import save_image, make_grid
def tensor_split(t):
arr = torch.split(t, 1, dim=2)
arr = [x.squeeze(2) for x in arr]
return arr
def tensor_merge(arr):
arr = [x.unsqueeze(1) for x in arr]
t = torch.cat(arr, dim=1)
return t.permute(0, 2, 1, 3, 4)
class FRP_Module(nn.Module):
def __init__(self, w, inplanes):
super(FRP_Module, self).__init__()
self._w = w
self.rpconv1d = nn.Conv1d(2, 1, 1, bias=False) # Rank Pooling Conv1d, Kernel Size 2x1x1
self.rpconv1d.weight.data = torch.FloatTensor([[[1.0], [0.0]]])
# self.bnrp = nn.BatchNorm3d(inplanes) # BatchNorm Rank Pooling
# self.relu = nn.ReLU(inplace=True)
self.hapooling = nn.MaxPool2d(kernel_size=2)
def forward(self, x, datt=None):
inp = x
if self._w < 1:
return x
def run_layer_on_arr(arr, l):
return [l(x) for x in arr]
def oneconv(a, b):
s = a.size()
c = torch.cat([a.contiguous().view(s[0], -1, 1), b.contiguous().view(s[0], -1, 1)], dim=2)
c = self.rpconv1d(c.permute(0, 2, 1)).permute(0, 2, 1)
return c.view(s)
if datt is not None:
tarr = tensor_split(x)
garr = tensor_split(datt)
while tarr[0].size()[3] < garr[0].size()[3]: # keep feature map and heatmap the same size
garr = run_layer_on_arr(garr, self.hapooling)
attarr = [a * (b + torch.ones(a.size()).cuda()) for a, b in zip(tarr, garr)]
datt = [oneconv(a, b) for a, b in zip(tarr, attarr)]
return tensor_merge(datt)
def tensor_arr_rp(arr):
l = len(arr)
def tensor_rankpooling(video_arr):
def get_w(N):
return [float(i) * 2 - N - 1 for i in range(1, N + 1)]
# re = torch.zeros(video_arr[0].size(0), 1, video_arr[0].size(2), video_arr[0].size(3)).cuda()
re = torch.zeros(video_arr[0].size()).cuda()
for a, b in zip(video_arr, get_w(len(video_arr))):
# a = transforms.Grayscale(1)(a)
re += a * b
re = F.gelu(re)
re -= torch.min(re)
re = re / torch.max(re) if torch.max(re) != 0 else re / (torch.max(re) + 0.00001)
return transforms.Grayscale(1)(re)
return [tensor_rankpooling(arr[i:i + self._w]) for i in range(l)]
arrrp = tensor_arr_rp(tensor_split(x))
b, c, t, h, w = tensor_merge(arrrp).shape
mask = torch.zeros(b, c, self._w-1, h, w, device=tensor_merge(arrrp).device)
garrs = torch.cat((mask, tensor_merge(arrrp)), dim=2)
return garrs
if __name__ == '__main__':
model = SATT_Module().cuda()
inp = torch.randn(2, 3, 64, 224, 224).cuda()
out = model(inp)
print(out.shape)
| 3,236 | 36.206897 | 110 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/fusion_Net.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import einsum
from torch.autograd import Variable
from collections import OrderedDict
import numpy as np
import os
import sys
from collections import OrderedDict
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
sys.path.append(['../../', '../'])
from utils import load_pretrained_checkpoint, load_checkpoint, SoftTargetCrossEntropy, concat_all_gather, uniform_sampling
import logging
# from .DSN_Fusion import DSNNet
from .DSN_v2 import DSNNetV2
from .DTN_v2 import DTNNet as DTNNetV2
from .DTN_v2 import Transformer, clsToken
from .trans_module import *
class Encoder(nn.Module):
def __init__(self, C_in, C_out, dilation=2):
super(Encoder, self).__init__()
self.enconv = nn.Sequential(
nn.Conv2d(C_in, C_in, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(C_in),
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in // 2, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(C_in // 2),
nn.ReLU(inplace=False),
nn.Conv2d(C_in // 2, C_in // 4, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(C_in // 4),
nn.ReLU(inplace=False),
nn.Conv2d(C_in // 4, C_out, kernel_size=1, stride=1, padding=0, bias=False),
)
def forward(self, x1, x2):
b, c = x1.shape
x = torch.cat((x1, x2), dim=1).view(b, -1, 1, 1)
x = self.enconv(x)
return x
class Decoder(nn.Module):
def __init__(self, C_in, C_out, dilation=2):
super(Decoder, self).__init__()
self.deconv = nn.Sequential(
nn.Conv2d(C_in, C_out // 4, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out // 4),
nn.ReLU(),
nn.Conv2d(C_out // 4, C_out // 2, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out // 2),
nn.ReLU(),
)
def forward(self, x):
x = self.deconv(x)
return x
class FusionModule(nn.Module):
def __init__(self, channel_in=1024, channel_out=256, num_classes=60):
super(FusionModule, self).__init__()
self.encoder = Encoder(channel_in, channel_out)
self.decoder = Decoder(channel_out, channel_in)
self.efc = nn.Conv2d(channel_out, num_classes, kernel_size=1, padding=0, bias=False)
def forward(self, r, d):
en_x = self.encoder(r, d) # [4, 256, 1, 1]
de_x = self.decoder(en_x)
en_x = self.efc(en_x)
return en_x.squeeze(), de_x
class DTN(DTNNetV2):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, x):
B, N, C = x.shape
# Local-Global features capturing
outputs, tem_feat = [], []
temp = self.temp_schedule[self._args.epoch]
for cls_token, (TCNN, MaxPool, TransBlock, mlp) in zip(self.cls_tokens, self.multi_scale_transformers):
sl =uniform_sampling(x.size(1), cls_token.frame_rate, random=self.training)
sub_x = x[:, sl, :]
sub_x = sub_x.permute(0, 2, 1).view(B, C, -1, 1, 1)
sub_x = MaxPool(TCNN(sub_x))
sub_x = sub_x.permute(0, 2, 1, 3, 4).view(B, -1, C)
sub_x = cls_token(sub_x)
sub_x = TransBlock(sub_x)
sub_x = sub_x[:, 0, :]
tem_feat.append(sub_x.unsqueeze(-1))
out = mlp(sub_x)
outputs.append(out / temp)
# Multi-branch fusion
if self.branch_merge == 'sum':
x = torch.zeros_like(out)
for out in outputs:
x += out
elif self.branch_merge == 'pool':
x = torch.cat([out.unsqueeze(-1) for out in outputs], dim=-1)
x = self.max_pool(x).squeeze()
return x, outputs, torch.cat(tem_feat, dim=-1).mean(-1)
class DSN(DSNNetV2):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dtn = DTN(self.args, num_classes=self._num_classes)
def forward(self, x, endpoint=None):
# if endpoint=='spatial':
x = self.stem(x)
temp_out = []
for i, sms_layer in enumerate(self.SMS_layers):
x = sms_layer(x)
if isinstance(x, tuple):
x, temp_w = x
temp_out.append(temp_w)
self.feat = x
x = self.avg_pool(x).view(x.size(0), x.size(1), -1).permute(0, 2, 1)
x = self.LinearMap(x)
x = self.frames_droupout(x)
spati_feat = x
self.visweight = torch.sigmoid(x[0])
target_out = []
for j in range(len(self.dtn.multi_scale_transformers)):
target_out.append(self.dtn.multi_scale_transformers[j][2].get_classEmbd())
# return x, (temp_out, target_out)
x, (xs, xm, xl), tem_feat = self.dtn(x)
return (x, xs, xm, xl), spati_feat, tem_feat, (temp_out, target_out)
class AttentionNet(nn.Module):
def __init__(self, dim=512, heads=8, dim_head=64, mlp_dim=768, dropout=0.1, knn_attention=True, topk=0.7):
super(AttentionNet, self).__init__()
self.knn_attention = knn_attention
self.topk = topk
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.q = nn.Linear(dim, inner_dim, bias=False)
self.k = nn.Linear(dim, inner_dim, bias=False)
self.v = nn.Linear(dim, inner_dim, bias=False)
self.norm = nn.LayerNorm(dim)
self.ffn = PreNorm(dim, FeedForward(dim, mlp_dim, dropout=dropout))
# self.map = nn.Linear(inner_dim, dim, bias=True)
def forward(self, x_r, x_d):
b, n, c, h = *x_r.shape, self.heads
q, k, v = self.q(x_r), self.k(x_d), self.v(x_r)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), [q, k, v])
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
if self.knn_attention:
mask = torch.zeros(b, self.heads, n, n, device=x_r.device, requires_grad=False)
index = torch.topk(dots, k=int(dots.size(-1)*self.topk), dim=-1, largest=True)[1]
mask.scatter_(-1, index, 1.)
dots = torch.where(mask > 0, dots, torch.full_like(dots, float('-inf')))
attn = dots.softmax(dim=-1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.norm(out) + x_r
out = self.ffn(out) + out
return out
class EnhanceModule(nn.Module):
def __init__(self, dim=512):
super(EnhanceModule, self).__init__()
self.mlp_rgb = nn.Sequential(
nn.Linear(dim*2, dim),
nn.ReLU(),
nn.Linear(dim, dim),
nn.Sigmoid()
)
self.mlp_depth = nn.Sequential(
nn.Linear(dim*2, dim),
nn.ReLU(),
nn.Linear(dim, dim),
nn.Sigmoid()
)
self.norm = nn.LayerNorm(dim*2)
def forward(self, xr, xd):
joint_feature = self.norm(torch.cat((xr, xd), dim=-1))
score_grb = self.mlp_rgb(joint_feature)
score_depth = self.mlp_depth(joint_feature)
xr = xr * score_grb
xd = xd * score_depth
return xr, xd
class ComplementSpatial(nn.Module):
def __init__(self, depths=2, dim=512):
super(ComplementSpatial, self).__init__()
self.att_nets = nn.ModuleList([])
for _ in range(depths):
self.att_nets.append(nn.ModuleList([
EnhanceModule(dim),
AttentionNet(dim),
AttentionNet(dim)
]))
self.norm = nn.LayerNorm(dim*2)
def forward(self, xr, xd):
b, n, c = xr.shape
xr, xd = torch.split(self.norm(torch.cat((xr, xd), dim=-1)), [c, c], dim=-1)
for EM, ANM, ANK in self.att_nets:
xr, xd = EM(xr, xd)
# xr, xd = ANM(xr, xd), ANK(xd, xr)
cm = ANM(xr, xd)
ck = ANK(xd, xr)
xr, xd = cm, ck
return xr, xd
class ComplementTemporal(nn.Module):
def __init__(self, depths=2, dim=512):
super(ComplementTemporal, self).__init__()
self.att_nets = nn.ModuleList([])
for _ in range(depths):
self.att_nets.append(nn.ModuleList([
# EnhanceModule(dim),
AttentionNet(dim),
AttentionNet(dim)
]))
self.norm = nn.LayerNorm(dim*2)
def forward(self, xr, xd):
b, n, c = xr.shape
xr, xd = torch.split(self.norm(torch.cat((xr, xd), dim=-1)), [c, c], dim=-1)
for ANM, ANK in self.att_nets:
# xr, xd = ANM(xr, xd), ANK(xd, xr)
# xr, xd = EM(xr, xd)
cm = ANM(xr, xd)
ck = ANK(xd, xr)
xr, xd = cm, ck
return xr, xd
class SFNNet(nn.Module):
def __init__(self, args, num_classes, pretrained, spatial_interact=False, temporal_interact=False):
super(SFNNet, self).__init__()
self.linear = nn.Linear(2, num_classes)
def forward(self, logitr, logitd):
b, c = logitr.shape
softmaxr = torch.softmax(logitr, dim=-1)
softmaxd = torch.softmax(logitd, dim=-1)
cat_softmax = torch.cat((softmaxr.unsqueeze(-1), softmaxd.unsqueeze(-1)), dim=-1)
output = self.linear(cat_softmax)
output *= torch.eye(c, c, device=logitr.device, requires_grad=False)
return output.sum(-1)
class CrossFusionNet(nn.Module):
def __init__(self, args, num_classes, pretrained, spatial_interact=False, temporal_interact=False):
super(CrossFusionNet, self).__init__()
self._MES = torch.nn.MSELoss()
self._BCE = torch.nn.BCELoss()
self._CE = SoftTargetCrossEntropy()
self.spatial_interact = spatial_interact
self.temporal_interact = temporal_interact
self.args = args
self.frame_rate = args.sample_duration #//2 if args.sample_duration > 32 else args.sample_duration
self.visweight = None
self.feat = None
self.pca_data = None
self.target_data = None
self.SCC_Module = ComplementSpatial(depths=args.scc_depth)
self.temp_enhance_module = EnhanceModule(dim=512)
self.TimesFormer = ComplementTemporal(depths=args.tcc_depth)
# self.timesform1 = Transformer(dim=512, depth=2, heads=8, dim_head=64, mlp_dim=768,
# dropout=0.1)
# self.cls_token1 = clsToken(self.frame_rate+1, 512)
self.pos_embedding_M = nn.Parameter(torch.randn(1, self.frame_rate + 1, 512))
# self.timesform2 = Transformer(dim=512, depth=2, heads=8, dim_head=64, mlp_dim=768,
# dropout=0.1)
# self.cls_token2 = clsToken(self.frame_rate+1, 512)
self.pos_embedding_K = nn.Parameter(torch.randn(1, self.frame_rate + 1, 512))
self.classifier1 = nn.Linear(512, num_classes)
self.classifier2 = nn.Linear(512, num_classes)
self.max_pool = nn.MaxPool3d(kernel_size=(2, 1, 1), stride=(2, 1, 1), padding=0)
self.norm1 = nn.LayerNorm(512)
self.norm2 = nn.LayerNorm(512)
if pretrained:
load_pretrained_checkpoint(self, pretrained)
logging.info("Load Pre-trained model state_dict Done !")
def forward(self, hidden_feature):
spatial_M, spatial_K, temporal_M, temporal_K = hidden_feature
comple_features_M, comple_features_K = self.SCC_Module(spatial_M, spatial_K)
b, n, c = comple_features_M.shape
# if self.frame_rate > 32:
# comple_features_M = self.max_pool(comple_features_M.view(b, c, n, 1, 1)).view(b, n//2, c)
# comple_features_K = self.max_pool(comple_features_K.view(b, c, n, 1, 1)).view(b, n//2, c)
temporal_enhance_M, temporal_enhance_K = self.temp_enhance_module(temporal_M, temporal_K)
# temporal_enhance_M, temporal_enhance_K = temporal_M, temporal_K
temporal_feature_M = self.norm1(torch.cat((temporal_enhance_M.unsqueeze(1), comple_features_M), dim=1))
temporal_feature_M += self.pos_embedding_M
# temporal_feature_M = self.cls_token1(temporal_feature_M)
# temporal_feature_M = self.timesform1(temporal_feature_M)
temporal_feature_K = self.norm2(torch.cat((temporal_enhance_K.unsqueeze(1), comple_features_K), dim=1))
temporal_feature_K += self.pos_embedding_K
# temporal_feature_K = self.cls_token2(temporal_feature_K)
# temporal_feature_K = self.timesform2(temporal_feature_K)
temporal_feature_M, temporal_feature_K = self.TimesFormer(temporal_feature_M, temporal_feature_K)
out_M = self.classifier1(temporal_feature_M[:, 0])
out_K = self.classifier2(temporal_feature_K[:, 0])
normal_func = lambda x: concat_all_gather(F.normalize(x, p = 2, dim=-1))
b, _ = normal_func(temporal_M).shape
self.pca_data = torch.cat((normal_func(temporal_M),normal_func(temporal_K), normal_func(temporal_feature_M[:, 0]), normal_func(temporal_feature_K[:, 0])))
self.target_data = torch.cat((torch.ones(b), torch.ones(b)+1, torch.ones(b)+2, torch.ones(b)+3))
return (out_M,out_K), (None, torch.cat((temporal_feature_M[:, 0].unsqueeze(-1), temporal_feature_K[:, 0].unsqueeze(-1)), dim=-1))
def get_cluster_visualization(self):
return self.pca_data, self.target_data
def get_visualization(self):
return self.feat, self.visweight
class FeatureCapter(nn.Module):
def __init__(self, args, num_classes=249, pretrained=None):
super(FeatureCapter, self).__init__()
self.args = args
assert args.rgb_checkpoint and args.depth_checkpoint
self.Modalit_rgb = DSN(args, num_classes=num_classes)
self.Modalit_depth = DSN(args, num_classes=num_classes)
rgb_checkpoint = args.rgb_checkpoint[args.FusionNet]
self.strat_epoch_r, best_acc = load_checkpoint(self.Modalit_rgb, rgb_checkpoint)
print(f'Best acc RGB: {best_acc}')
depth_checkpoint = args.depth_checkpoint[args.FusionNet]
self.strat_epoch_d, best_acc = load_checkpoint(self.Modalit_depth, depth_checkpoint)
print(f'Best acc depth: {best_acc}')
def forward(self, rgb, depth):
self.args.epoch = self.strat_epoch_r - 1
(logit_M, M_xs, M_xm, M_xl), spatial_M, temporal_M, temp_out_M = self.Modalit_rgb(rgb, endpoint='spatial')
self.args.epoch = self.strat_epoch_d - 1
(logit_K, K_xs, K_xm, K_xl), spatial_K, temporal_K, temp_out_K = self.Modalit_depth(depth, endpoint='spatial')
return (logit_M, M_xs, M_xm, M_xl), (logit_K, K_xs, K_xm, K_xl), (spatial_M, spatial_K, temporal_M, temporal_K) | 14,925 | 38.802667 | 162 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/DTN_v2.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
from torch.autograd import Variable
from torch import nn, einsum
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, helpers, DropPath
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
import numpy as np
import random, math
from .utils import *
from .trans_module import *
from utils import uniform_sampling
import matplotlib.pyplot as plt # For graphics
import seaborn as sns
import cv2
np.random.seed(123)
random.seed(123)
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout=0., apply_transform=False, knn_attention=0.7):
super().__init__()
self.layers = nn.ModuleList([])
self.cls_embed = [None for _ in range(depth)]
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout,
apply_transform=apply_transform, knn_attention=knn_attention)),
PreNorm(dim, FeedForward(dim, mlp_dim, dropout=dropout))
]))
def forward(self, x):
for ii, (attn, ff) in enumerate(self.layers):
x = attn(x) + x
x = ff(x) + x
self.cls_embed[ii] = x[:, 0]
return x
def get_classEmbd(self):
return self.cls_embed
class clsToken(nn.Module):
def __init__(self, frame_rate, inp_dim):
super().__init__()
self.frame_rate = frame_rate
num_patches = frame_rate
self.cls_token = nn.Parameter(torch.randn(1, 1, inp_dim))
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, inp_dim))
def forward(self, x):
B, N, C = x.shape
cls_token = repeat(self.cls_token, '() n d -> b n d', b=B)
x = torch.cat((cls_token, x), dim=1)
x += self.pos_embedding[:, :(N + 1)]
return x
class DTNNet(nn.Module):
def __init__(self, args, num_classes=249, inp_dim=512, dim_head=64, hidden_dim=768,
heads=8, pool='cls', dropout=0.1, emb_dropout=0.1, mlp_dropout=0.0, branch_merge='pool',
init: bool = False,
warmup_temp_epochs: int = 30,
branchs=3,
dynamic_tms=True):
super().__init__()
self._args = args
print('Temporal Resolution:' )
frame_rate = args.sample_duration // args.intar_fatcer
# names = self.__dict__
self.cls_tokens = nn.ModuleList([])
dynamic_kernel = []
for i in range(branchs):
# names['cls_token_' + str(i)] = nn.Parameter(torch.randn(1, 1, frame_rate))
self.cls_tokens.append(clsToken(frame_rate, inp_dim))
print(frame_rate)
dynamic_kernel.append(int(frame_rate**0.5))
frame_rate += args.sample_duration // args.intar_fatcer
'''
constract multi-branch structures
'''
trans_depth = args.N
self.multi_scale_transformers = nn.ModuleList([])
for ii in range(branchs):
self.multi_scale_transformers.append(
nn.ModuleList([
TemporalInceptionModule(inp_dim, [160,112,224,24,64,64], kernel_size=dynamic_kernel[ii] if dynamic_tms else 3),
MaxPool3dSamePadding(kernel_size=[3, 1, 1], stride=(1, 1, 1), padding=0),
Transformer(inp_dim, trans_depth, heads, dim_head, mlp_dim=hidden_dim, dropout=emb_dropout, knn_attention=args.knn_attention),
nn.Sequential(
nn.LayerNorm(inp_dim),
nn.Dropout(mlp_dropout),
nn.Linear(inp_dim, num_classes))
]))
# num_patches = args.sample_duration
# self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, inp_dim))
self.avg_pool = nn.AdaptiveAvgPool3d(1)
self.max_pool = nn.AdaptiveMaxPool1d(1)
self.branch_merge = branch_merge
warmup_temp, temp = map(float, args.temp)
self.temp_schedule = np.concatenate((
np.linspace(warmup_temp,
temp, warmup_temp_epochs),
np.ones(args.epochs - warmup_temp_epochs) * temp
))
# self.show_res = Rearrange('b t (c p1 p2) -> b t c p1 p2', p1=int(small_dim ** 0.5), p2=int(small_dim ** 0.5))
if init:
self.init_weights()
def TC_forward(self):
return self.tc_feat
# @torch.no_grad()
def init_weights(self):
def _init(m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(
m.weight) # _trunc_normal(m.weight, std=0.02) # from .initialization import _trunc_normal
if hasattr(m, 'bias') and m.bias is not None:
nn.init.normal_(m.bias, std=1e-6) # nn.init.constant(m.bias, 0)
self.apply(_init)
def forward(self, x): # x size: [2, 64, 512]
B, N, C = x.shape
# Add position embedding
# x += self.pos_embedding
# Local-Global features capturing
outputs = []
temp = self.temp_schedule[self._args.epoch]
for cls_token, (TCNN, MaxPool, TransBlock, mlp) in zip(self.cls_tokens, self.multi_scale_transformers):
# cls_token = self.__dict__['cls_token_{}'.format(i)]
sl = uniform_sampling(x.size(1), cls_token.frame_rate, random=self.training)
sub_x = x[:, sl, :]
sub_x = sub_x.permute(0, 2, 1).view(B, C, -1, 1, 1)
sub_x = MaxPool(TCNN(sub_x))
sub_x = sub_x.permute(0, 2, 1, 3, 4).view(B, -1, C)
sub_x = cls_token(sub_x)
sub_x = TransBlock(sub_x)
sub_x = sub_x[:, 0, :]
out = mlp(sub_x)
outputs.append(out / temp)
# Multi-branch fusion
if self.branch_merge == 'sum':
x = torch.zeros_like(out)
for out in outputs:
x += out
elif self.branch_merge == 'pool':
x = torch.cat([out.unsqueeze(-1) for out in outputs], dim=-1)
x = self.max_pool(x).squeeze()
return x, outputs | 6,286 | 35.982353 | 146 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/utils.py | '''
This file is modified from:
https://github.com/deepmind/kinetics-i3d/blob/master/i3d.py
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import os
import sys
class MaxPool3dSamePadding(nn.MaxPool3d):
def compute_pad(self, dim, s):
if s % self.stride[dim] == 0:
return max(self.kernel_size[dim] - self.stride[dim], 0)
else:
return max(self.kernel_size[dim] - (s % self.stride[dim]), 0)
def forward(self, x, is_pad=True):
(batch, channel, t, h, w) = x.size()
pad_t = self.compute_pad(0, t)
pad_h = self.compute_pad(1, h)
pad_w = self.compute_pad(2, w)
pad_t_f = pad_t // 2
pad_t_b = pad_t - pad_t_f
pad_h_f = pad_h // 2
pad_h_b = pad_h - pad_h_f
pad_w_f = pad_w // 2
pad_w_b = pad_w - pad_w_f
pad = (pad_w_f, pad_w_b, pad_h_f, pad_h_b, pad_t_f, pad_t_b)
if is_pad:
x = F.pad(x, pad)
return super(MaxPool3dSamePadding, self).forward(x)
class Unit3D(nn.Module):
def __init__(self, in_channels,
output_channels,
kernel_shape=(1, 1, 1),
stride=(1, 1, 1),
padding=0,
activation_fn=F.relu,
use_batch_norm=True,
use_bias=False,
name='unit_3d'):
"""Initializes Unit3D module."""
super(Unit3D, self).__init__()
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
self._use_batch_norm = use_batch_norm
self._activation_fn = activation_fn
self._use_bias = use_bias
self.name = name
self.padding = padding
self.conv3d = nn.Conv3d(in_channels=in_channels,
out_channels=self._output_channels,
kernel_size=self._kernel_shape,
stride=self._stride,
padding=0,
bias=self._use_bias)
if self._use_batch_norm:
self.bn = nn.BatchNorm3d(self._output_channels, eps=0.001, momentum=0.01)
def compute_pad(self, dim, s):
if s % self._stride[dim] == 0:
return max(self._kernel_shape[dim] - self._stride[dim], 0)
else:
return max(self._kernel_shape[dim] - (s % self._stride[dim]), 0)
def forward(self, x):
(batch, channel, t, h, w) = x.size()
pad_t = self.compute_pad(0, t)
pad_h = self.compute_pad(1, h)
pad_w = self.compute_pad(2, w)
pad_t_f = pad_t // 2
pad_t_b = pad_t - pad_t_f
pad_h_f = pad_h // 2
pad_h_b = pad_h - pad_h_f
pad_w_f = pad_w // 2
pad_w_b = pad_w - pad_w_f
pad = (pad_w_f, pad_w_b, pad_h_f, pad_h_b, pad_t_f, pad_t_b)
x = F.pad(x, pad)
x = self.conv3d(x)
if self._use_batch_norm:
x = self.bn(x)
if self._activation_fn is not None:
x = self._activation_fn(x)
return x
class TemporalInceptionModule(nn.Module):
def __init__(self, in_channels, out_channels, name='temporal', kernel_size=3):
super(TemporalInceptionModule, self).__init__()
self.b0 = Unit3D(in_channels=in_channels, output_channels=out_channels[0], kernel_shape=[1, 1, 1], padding=0,
name=name+'/Branch_0/Conv3d_0a_1x1')
self.b1a = Unit3D(in_channels=in_channels, output_channels=out_channels[1], kernel_shape=[1, 1, 1], padding=0,
name=name+'/Branch_1/Conv3d_0a_1x1')
self.b1b = Unit3D(in_channels=out_channels[1], output_channels=out_channels[2], kernel_shape=[kernel_size, 1, 1],
name=name+'/Branch_1/Conv3d_0b_3x3')
self.b2a = Unit3D(in_channels=in_channels, output_channels=out_channels[3], kernel_shape=[1, 1, 1], padding=0,
name=name+'/Branch_2/Conv3d_0a_1x1')
self.b2b = Unit3D(in_channels=out_channels[3], output_channels=out_channels[4], kernel_shape=[kernel_size, 1, 1],
name=name+'/Branch_2/Conv3d_0b_3x3')
self.b3a = MaxPool3dSamePadding(kernel_size=[kernel_size, 1, 1],
stride=(1, 1, 1), padding=0)
self.b3b = Unit3D(in_channels=in_channels, output_channels=out_channels[5], kernel_shape=[1, 1, 1], padding=0,
name=name+'/Branch_3/Conv3d_0b_1x1')
self.name = name
def forward(self, x):
b0 = self.b0(x)
b1 = self.b1b(self.b1a(x))
b2 = self.b2b(self.b2a(x))
b3 = self.b3b(self.b3a(x))
return torch.cat([b0,b1,b2,b3], dim=1)
class SpatialInceptionModule(nn.Module):
def __init__(self, in_channels, out_channels, name):
super(SpatialInceptionModule, self).__init__()
self.b0 = Unit3D(in_channels=in_channels, output_channels=out_channels[0], kernel_shape=[1, 1, 1], padding=0,
name=name + '/Branch_0/Conv3d_0a_1x1')
self.b1a = Unit3D(in_channels=in_channels, output_channels=out_channels[1], kernel_shape=[1, 1, 1], padding=0,
name=name + '/Branch_1/Conv3d_0a_1x1')
self.b1b = Unit3D(in_channels=out_channels[1], output_channels=out_channels[2], kernel_shape=[1, 3, 3],
name=name + '/Branch_1/Conv3d_0b_3x3')
self.b2a = Unit3D(in_channels=in_channels, output_channels=out_channels[3], kernel_shape=[1, 1, 1], padding=0,
name=name + '/Branch_2/Conv3d_0a_1x1')
self.b2b = Unit3D(in_channels=out_channels[3], output_channels=out_channels[4], kernel_shape=[1, 3, 3],
name=name + '/Branch_2/Conv3d_0b_3x3')
self.b3a = MaxPool3dSamePadding(kernel_size=[3, 3, 3],
stride=(1, 1, 1), padding=0)
self.b3b = Unit3D(in_channels=in_channels, output_channels=out_channels[5], kernel_shape=[1, 1, 1], padding=0,
name=name + '/Branch_3/Conv3d_0b_1x1')
self.name = name
def forward(self, x):
b0 = self.b0(x)
b1 = self.b1b(self.b1a(x))
b2 = self.b2b(self.b2a(x))
b3 = self.b3b(self.b3a(x))
return torch.cat([b0, b1, b2, b3], dim=1) | 6,415 | 40.662338 | 121 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/DSN_v2.py | '''
This file is modified from:
https://github.com/deepmind/kinetics-i3d/i3d.py
'''
import torch
from torch import nn, einsum
from einops.layers.torch import Rearrange
import torch.nn.functional as F
from torch.autograd import Variable
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
import numpy as np
import cv2
import os, math
import sys
from .DTN import DTNNet
from .DTN_v2 import DTNNet as DTNNetV2
from .FRP import FRP_Module
from .utils import *
import os, math
import sys
sys.path.append('../../')
from collections import OrderedDict
from utils import load_pretrained_checkpoint
import logging
class RCMModule(nn.Module):
def __init__(self, args, dim_head=16):
super(RCMModule, self).__init__()
args.recoupling = False
self.args = args
self._distill = True
self.heads = args.sample_duration
self.inp_dim = args.sample_duration
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.avg_pool3d = nn.AdaptiveAvgPool3d(1)
# Self Attention Layers
self.q = nn.Linear(self.inp_dim, dim_head * self.heads, bias=False)
self.k = nn.Linear(self.inp_dim, dim_head * self.heads, bias=False)
self.scale = dim_head ** -0.5
# Distill MLP
if self._distill:
self.TM_project = nn.Sequential(
nn.Linear(self.inp_dim, self.inp_dim*2, bias=False),
nn.GELU(),
nn.Linear(self.inp_dim*2, self.inp_dim, bias=False),
nn.LayerNorm(self.inp_dim),
)
temp_out = args.sample_duration//2 if args.sample_duration == 64 else args.sample_duration
self.linear = nn.Linear(self.inp_dim, 512)
def forward(self, x):
b, c, t, h, w = x.shape
residual = x
x = rearrange(x, 'b c t h w -> (b t) c h w')
x = self.avg_pool(x)
x = rearrange(x, '(b t) c h w -> b c (t h w)', t=t)
# x = self.norm(x)
q, k = self.q(x), self.k(x)
v = rearrange(residual, 'b c t h w -> b t c (h w)')
q, k = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.heads), [q, k])
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
attn = dots.softmax(dim=-1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
# attn = attn.mean(-2, keepdim=True).transpose(2,3)
# out = v * attn.expand_as(v)
out = rearrange(out, 'b t c (h w) -> b c t h w', h=h, w=w)
# out += residual
if self._distill:
temporal_embedding = self.avg_pool3d(out.permute(0, 2, 1, 3, 4)).squeeze()
temporal_project = self.TM_project(temporal_embedding)
temporal_weight = torch.sigmoid(temporal_project)[:, None, :, None, None]
out = out * temporal_weight.expand_as(out)
temporal_weight = temporal_weight.squeeze()
out += residual
return out, self.linear(temporal_project)
class SMSBlock(nn.Module):
def __init__(self, channel_list, kernel_size=None, stride=None, padding=0, name='i3d'):
super(SMSBlock, self).__init__()
in_channels, hidden_channels, out_channels = channel_list
self.end_points = {}
end_point = 'Mixed1'
self.end_points[end_point] = SpatialInceptionModule(in_channels, hidden_channels, name + end_point)
end_point = 'Mixed2'
self.end_points[end_point] = SpatialInceptionModule(sum([hidden_channels[0], hidden_channels[2],
hidden_channels[4], hidden_channels[5]]), out_channels, name + end_point)
if kernel_size is not None:
end_point = 'MaxPool3d_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=kernel_size, stride=stride,
padding=padding)
self.build()
def build(self):
for k in self.end_points.keys():
self.add_module(k, self.end_points[k])
def forward(self, x):
for end_point in self.end_points.keys():
x = self._modules[end_point](x)
return x
class Channel_Pooling(nn.Module):
def __init__(self):
super(Channel_Pooling, self).__init__()
self.max_pool = nn.MaxPool3d(kernel_size=(2,1,1), stride=(2, 1, 1), padding=0)
def forward(self, x):
# x size: torch.Size([16, 512, 16, 7, 7])
x = x.transpose(1, 2)
x = self.max_pool(x)
return x.transpose(1, 2)
class DSNNetV2(nn.Module):
def __init__(self, args, num_classes=400, spatial_squeeze=True, name='inception_i3d', in_channels=3,
pretrained: bool = False,
sms_depth: int = 3,
dropout_spatial_prob: float=0.0,
frames_drop_rate: float=0.0):
super(DSNNetV2, self).__init__()
self._num_classes = num_classes
self._spatial_squeeze = spatial_squeeze
self.args = args
self.stem = nn.Sequential(
Unit3D(in_channels=in_channels, output_channels=64, kernel_shape=[1, 7, 7],
stride=(1, 2, 2), padding=(0, 3, 3), name=name + 'Conv3d_1a_7x7'),
MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2), padding=0),
Unit3D(in_channels=64, output_channels=64, kernel_shape=[1, 1, 1], padding=0,
name=name + 'Conv3d_2b_1x1'),
Unit3D(in_channels=64, output_channels=192, kernel_shape=[1, 3, 3],
padding=(0, 1, 1), name=name + 'Conv3d_2c_3x3'),
MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2), padding=0)
)
'''
Spatial Multi-scale Features Learning
'''
sms_block = [
# input_dim, hidden_dim, output_dim
[192, [64, 96, 128, 16, 32, 32], [128, 128, 192, 32, 96, 64]],
[128 + 192 + 96 + 64, [192, 96, 208, 16, 48, 64], [160, 112, 224, 24, 64, 64]],
[160 + 224 + 64 + 64, [256, 160, 320, 32, 128, 128], [384, 192, 384, 48, 128, 128]],
]
assert len(sms_block) == sms_depth
self.SMS_layers = nn.ModuleList([])
for i in range(sms_depth):
if i == 0:
self.SMS_layers.append(
SMSBlock(sms_block[i], kernel_size=[1,3,3], stride=(1,2,2), padding=0)
)
elif i==1:
self.SMS_layers.append(
SMSBlock(sms_block[i], kernel_size=[1,2,2], stride=(1,2,2), padding=0)
)
elif i==2:
self.SMS_layers.append(SMSBlock(sms_block[i], kernel_size=[1,1,1], stride=(1,1,1), padding=0))
self.SMS_layers.append(Channel_Pooling()),
self.SMS_layers.append(RCMModule(args))
self.LinearMap = nn.Sequential(
nn.Dropout(dropout_spatial_prob),
nn.LayerNorm(512),
nn.Linear(512, 512),
)
self.avg_pool = nn.AdaptiveAvgPool3d((None, 1, 1))
self.dtn = DTNNetV2(args, num_classes=self._num_classes)
self.rrange = Rearrange('b c t h w -> b t c h w')
self.frames_droupout = torch.nn.Dropout2d(p=frames_drop_rate, inplace=False)
# Feature visualization
self.feat = None
self.visweight = None
def get_visualization(self):
return self.feat, self.visweight
def build(self):
for k in self.SMS_layers.keys():
self.add_module(k, self.SMS_layers[k])
def forward(self, x, garr=None):
inp = x
x = self.stem(x)
temp_out = []
for i, sms_layer in enumerate(self.SMS_layers):
x = sms_layer(x)
if isinstance(x, tuple):
x, temp_w = x
temp_out.append(temp_w)
if i == 1:
f = x
self.feat = x.data
x = self.avg_pool(x).view(x.size(0), x.size(1), -1).permute(0, 2, 1)
x = self.LinearMap(x)
x = self.frames_droupout(x)
cnn_vison = self.rrange(f.sum(dim=1, keepdim=True))
self.visweight = torch.sigmoid(x[0])
# logits, _, (att_map, cosin_similar, MHAS, visweight) = self.dtn(x)
x, (xs, xm, xl) = self.dtn(x)
target_out = []
for j in range(len(self.dtn.multi_scale_transformers)):
target_out.append(self.dtn.multi_scale_transformers[j][2].get_classEmbd())
return (x, xs, xm, xl), (temp_out, target_out)
| 8,548 | 36.827434 | 137 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/trans_module.py | '''
This file is modified from:
https://github.com/rishikksh20/CrossViT-pytorch/blob/master/crossvit.py
'''
import torch
from torch import nn, einsum
import torch.nn.functional as F
import math
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
# class FeedForward(nn.Module):
# def __init__(self, dim, hidden_dim, dropout=0.):
# super().__init__()
# self.net = nn.Sequential(
# nn.Linear(dim, hidden_dim),
# nn.GELU(),
# nn.Dropout(dropout),
# nn.Linear(hidden_dim, dim),
# nn.Dropout(dropout)
# )
# def forward(self, x):
# return self.net(x)
class FeedForward(nn.Module):
"""FeedForward Neural Networks for each position"""
def __init__(self, dim, hidden_dim, dropout=0.):
super().__init__()
self.fc1 = nn.Linear(dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
# (B, S, D) -> (B, S, D_ff) -> (B, S, D)
return self.dropout(self.fc2(self.dropout(F.gelu(self.fc1(x)))))
class Attention(nn.Module):
def __init__(self, dim, heads=8, dim_head=64, dropout=0., apply_transform=False, transform_scale=True, knn_attention=0.7):
super().__init__()
inner_dim = dim_head * heads
project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.apply_transform = apply_transform
self.knn_attention = bool(knn_attention)
self.topk = knn_attention
if apply_transform:
self.reatten_matrix = torch.nn.Conv2d(heads, heads, 1, 1)
self.var_norm = torch.nn.BatchNorm2d(heads)
self.reatten_scale = self.scale if transform_scale else 1.0
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias=False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
) if project_out else nn.Identity()
self.scores = None
def forward(self, x):
b, n, _, h = *x.shape, self.heads
qkv = self.to_qkv(x).chunk(3, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), qkv)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
if self.knn_attention:
mask = torch.zeros(b, self.heads, n, n, device=x.device, requires_grad=False)
index = torch.topk(dots, k=int(dots.size(-1)*self.topk), dim=-1, largest=True)[1]
mask.scatter_(-1, index, 1.)
dots = torch.where(mask > 0, dots, torch.full_like(dots, float('-inf')))
attn = dots.softmax(dim=-1)
if self.apply_transform:
attn = self.var_norm(self.reatten_matrix(attn)) * self.reatten_scale
self.scores = attn
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out
| 3,442 | 30.87963 | 126 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/DTN.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
import torch
from torch.autograd import Variable
from torch import nn, einsum
import torch.nn.functional as F
from timm.models.layers import trunc_normal_, helpers, DropPath
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
import numpy as np
import random, math
from .utils import *
from .trans_module import *
np.random.seed(123)
random.seed(123)
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout=0., apply_transform=False, knn_attention=0.7):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout,
apply_transform=apply_transform, knn_attention=knn_attention)),
PreNorm(dim, FeedForward(dim, mlp_dim, dropout=dropout))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x) + x
x = ff(x) + x
return x
class MultiScaleTransformerEncoder(nn.Module):
def __init__(self, args, small_dim=1024, small_depth=4, small_heads=8, small_dim_head=64, hidden_dim_small=768,
media_dim=1024, media_depth=4, media_heads=8, media_dim_head=64, hidden_dim_media=768,
large_dim=1024, large_depth=4, large_heads=8, large_dim_head=64, hidden_dim_large=768,
dropout=0.):
super().__init__()
self.transformer_enc_small = Transformer(small_dim, small_depth, small_heads, small_dim_head,
mlp_dim=hidden_dim_small, dropout=dropout, knn_attention=args.knn_attention)
self.transformer_enc_media = Transformer(media_dim, media_depth, media_heads, media_dim_head,
mlp_dim=hidden_dim_media, dropout=dropout, knn_attention=args.knn_attention)
self.transformer_enc_large = Transformer(large_dim, large_depth, large_heads, large_dim_head,
mlp_dim=hidden_dim_large, dropout=dropout, knn_attention=args.knn_attention)
self.Mixed_small = TemporalInceptionModule(512, [160,112,224,24,64,64], 'Mixed_small')
self.Mixed_media = TemporalInceptionModule(512, [160,112,224,24,64,64], 'Mixed_media')
self.Mixed_large = TemporalInceptionModule(512, [160, 112, 224, 24, 64, 64], 'Mixed_large')
self.MaxPool = MaxPool3dSamePadding(kernel_size=[3, 1, 1], stride=(1, 1, 1), padding=0)
self.class_embedding = None
def forward(self, xs, xm, xl, Local_flag=False):
# Local Modeling
if Local_flag:
cls_small = xs[:, 0]
xs = self.Mixed_small(xs[:, 1:, :].permute(0, 2, 1).view(xs.size(0), xs.size(-1), -1, 1, 1))
xs = self.MaxPool(xs)
xs = torch.cat((cls_small.unsqueeze(1), xs.view(xs.size(0), xs.size(1), -1).permute(0, 2, 1)), dim=1)
cls_media = xm[:, 0]
xm = self.Mixed_media(xm[:, 1:, :].permute(0, 2, 1).view(xm.size(0), xm.size(-1), -1, 1, 1))
xm = self.MaxPool(xm)
xm = torch.cat((cls_media.unsqueeze(1), xm.view(xm.size(0), xm.size(1), -1).permute(0, 2, 1)), dim=1)
cls_large = xl[:, 0]
xl = self.Mixed_large(xl[:, 1:, :].permute(0, 2, 1).view(xl.size(0), xl.size(-1), -1, 1, 1))
xl = self.MaxPool(xl)
xl = torch.cat((cls_large.unsqueeze(1), xl.view(xl.size(0), xl.size(1), -1).permute(0, 2, 1)), dim=1)
# Global Modeling
xs = self.transformer_enc_small(xs)
xm = self.transformer_enc_media(xm)
xl = self.transformer_enc_large(xl)
self.class_embedding = xs[:, 0] + xm[:, 0] + xl[:, 0]
return xs, xm, xl
class RCMModule(nn.Module):
def __init__(self, args, dim_head=64, method='New', merge='GAP'):
super(RCMModule, self).__init__()
self.merge = merge
self.heads = args.SEHeads
self.inp_dim = args.sample_duration
self.avg_pool = nn.AdaptiveAvgPool1d(1)
self.avg_pool3d = nn.AdaptiveAvgPool3d((None, 1, None))
# Self Attention Layers
self.q = nn.Linear(self.inp_dim, dim_head * self.heads, bias=False)
self.k = nn.Linear(self.inp_dim, dim_head * self.heads, bias=False)
self.scale = dim_head ** -0.5
self.method = method
if method == 'Ori':
self.norm = nn.LayerNorm(128)
self.project = nn.Sequential(
nn.Linear(self.inp_dim, 512, bias=False),
nn.GELU(),
nn.Linear(512, 512, bias=False),
nn.LayerNorm(512)
)
elif method == 'New':
if args.dataset == 'THU':
hidden_dim = 128
else:
hidden_dim = 256
self.project = nn.Sequential(
nn.Linear(self.inp_dim, hidden_dim, bias=False),
nn.GELU(),
nn.Linear(hidden_dim, self.inp_dim, bias=False),
nn.LayerNorm(self.inp_dim),
)
self.linear = nn.Linear(self.inp_dim, 512)
# init.kaiming_uniform_(self.linear, a=math.sqrt(5))
if self.heads > 1:
self.mergefc = nn.Sequential(
nn.Dropout(0.4),
nn.Linear(512 * self.heads, 512, bias=False),
nn.LayerNorm(512)
)
def forward(self, x):
b, c, t = x.shape
inp = x.clone()
# Sequence (Y) direction
xd_weight = self.project(self.avg_pool(inp.permute(0, 2, 1)).view(b, -1))
xd_weight = torch.sigmoid(xd_weight).view(b, -1, 1)
# Feature (X) direction
q, k = self.q(x), self.k(x)
q, k = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.heads), [q, k])
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
if self.merge == 'mean':
dots = dots.mean(dim=2)
elif self.merge == 'GAP':
dots = self.avg_pool3d(dots).squeeze()
if self.heads > 1:
dots = dots.view(b, -1)
dots = self.mergefc(dots)
else:
dots = dots.squeeze()
y = torch.sigmoid(dots).view(b, c, 1)
if self.method == 'Ori':
out = x * (y.expand_as(x) + xd_weight.expand_as(x))
visweight = xd_weight # for visualization
return out, xd_weight, visweight
elif self.method == 'New':
weight = einsum('b i d, b j d -> b i j', xd_weight, y)
out = x * weight.permute(0, 2, 1)
visweight = weight # for visualization
return out, self.linear(xd_weight.squeeze()), visweight
class DTNNet(nn.Module):
def __init__(self, args, num_classes=249, small_dim=512, media_dim=512, large_dim=512,
small_depth=1, media_depth=1, large_depth=1,
heads=8, pool='cls', dropout=0.1, emb_dropout=0.0, branch_merge='pool',
init: bool = False,
warmup_temp_epochs: int = 30):
super().__init__()
self.low_frames = args.sample_duration // args.intar_fatcer
self.media_frames = self.low_frames + args.sample_duration // args.intar_fatcer
self.high_frames = self.media_frames + args.sample_duration // args.intar_fatcer
print('Temporal Resolution:', self.low_frames, self.media_frames, self.high_frames)
self.branch_merge = branch_merge
self._args = args
warmup_temp, temp = map(float, args.temp)
multi_scale_enc_depth = args.N
num_patches_small = self.low_frames
num_patches_media = self.media_frames
num_patches_large = self.high_frames
self.pos_embedding_small = nn.Parameter(torch.randn(1, num_patches_small + 1, small_dim))
self.cls_token_small = nn.Parameter(torch.randn(1, 1, small_dim))
self.dropout_small = nn.Dropout(emb_dropout)
# trunc_normal_(self.pos_embedding_small, std=.02)
# trunc_normal_(self.cls_token_small, std=.02)
self.pos_embedding_media = nn.Parameter(torch.randn(1, num_patches_media + 1, media_dim))
self.cls_token_media = nn.Parameter(torch.randn(1, 1, media_dim))
self.dropout_media = nn.Dropout(emb_dropout)
# trunc_normal_(self.pos_embedding_media, std=.02)
# trunc_normal_(self.cls_token_media, std=.02)
self.pos_embedding_large = nn.Parameter(torch.randn(1, num_patches_large + 1, large_dim))
self.cls_token_large = nn.Parameter(torch.randn(1, 1, large_dim))
self.dropout_large = nn.Dropout(emb_dropout)
# trunc_normal_(self.pos_embedding_large, std=.02)
# trunc_normal_(self.cls_token_large, std=.02)
self.multi_scale_transformers = nn.ModuleList([])
for _ in range(multi_scale_enc_depth):
self.multi_scale_transformers.append(
MultiScaleTransformerEncoder(args, small_dim=small_dim, small_depth=small_depth,
small_heads=heads,
media_dim=media_dim, media_depth=media_depth,
media_heads=heads,
large_dim=large_dim, large_depth=large_depth,
large_heads=heads,
dropout=dropout))
self.pool = pool
# self.to_latent = nn.Identity()
self.avg_pool = nn.AdaptiveAvgPool1d(1)
self.max_pool = nn.AdaptiveMaxPool1d(1)
if self._args.recoupling:
self.rcm = RCMModule(args)
if args.Network != 'FusionNet':
self.mlp_head_small = nn.Sequential(
nn.LayerNorm(small_dim),
nn.Dropout(self._args.drop),
nn.Linear(small_dim, num_classes),
)
self.mlp_head_media = nn.Sequential(
nn.LayerNorm(media_dim),
nn.Dropout(self._args.drop),
nn.Linear(media_dim, num_classes),
)
self.mlp_head_large = nn.Sequential(
nn.LayerNorm(large_dim),
nn.Dropout(self._args.drop),
nn.Linear(large_dim, num_classes),
)
self.show_res = Rearrange('b t (c p1 p2) -> b t c p1 p2', p1=int(small_dim ** 0.5), p2=int(small_dim ** 0.5))
self.temp_schedule = np.concatenate((
np.linspace(warmup_temp,
temp, warmup_temp_epochs),
np.ones(args.epochs - warmup_temp_epochs) * temp
))
if init:
self.init_weights()
self.trans_feature = None
if self._args.temporal_consist:
self.TCMLP = nn.Sequential(
nn.ReLU(),
nn.Linear(small_dim, 1024),
nn.Dropout(0.1)
)
self.temporal_reduce = nn.Conv2d(in_channels=num_patches_small + num_patches_media + num_patches_large,
out_channels=num_patches_media,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.t_conv_group = nn.Sequential(
nn.ConvTranspose2d( in_channels=num_patches_media, out_channels=num_patches_media//2, stride=2, kernel_size=3, padding=1, output_padding=1,
dilation=1, padding_mode="zeros", bias=False ),
nn.BatchNorm2d(num_patches_media//2),
nn.ReLU(),
nn.ConvTranspose2d( in_channels=num_patches_media//2, out_channels=3, stride=2, kernel_size=3, padding=1, output_padding=1,
dilation=1, padding_mode="zeros", bias=False ),
nn.BatchNorm2d(3),
nn.ReLU(),
)
else:
self.tc_feat = None
def get_trans_feature(self):
return self.trans_feature
def TC_forward(self):
return self.tc_feat
# @torch.no_grad()
def init_weights(self):
def _init(m):
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(
m.weight) # _trunc_normal(m.weight, std=0.02) # from .initialization import _trunc_normal
if hasattr(m, 'bias') and m.bias is not None:
nn.init.normal_(m.bias, std=1e-6) # nn.init.constant(m.bias, 0)
self.apply(_init)
# ----------------------------------
# frames simple function
# ----------------------------------
def f(self, n, sn):
SL = lambda n, sn: [(lambda n, arr: n if arr == [] else random.choice(arr))(n * i / sn,
range(int(n * i / sn),
max(int(n * i / sn) + 1,
int(n * (
i + 1) / sn))))
for i in range(sn)]
return SL(n, sn)
def forward(self, img): # img size: [2, 64, 1024]
# ----------------------------------
# Recoupling:
# ----------------------------------
if self._args.recoupling:
img, spatial_weights, visweight = self.rcm(img.permute(0, 2, 1))
img = img.permute(0, 2, 1)
else:
visweight = img
# ----------------------------------
sl_low = self.f(img.size(1), self.low_frames)
xs = img[:, sl_low, :]
b, n, _ = xs.shape
cls_token_small = repeat(self.cls_token_small, '() n d -> b n d', b=b)
xs = torch.cat((cls_token_small, xs), dim=1)
xs += self.pos_embedding_small[:, :(n + 1)]
xs = self.dropout_small(xs)
# ----------------------------------
sl_media = self.f(img.size(1), self.media_frames)
xm = img[:, sl_media, :]
b, n, _ = xm.shape
cls_token_media = repeat(self.cls_token_media, '() n d -> b n d', b=b)
xm = torch.cat((cls_token_media, xm), dim=1)
xm += self.pos_embedding_media[:, :(n + 1)]
xm = self.dropout_media(xm)
# ----------------------------------
sl_high = self.f(img.size(1), self.high_frames)
xl = img[:, sl_high, :]
b, n, _ = xl.shape
cls_token_large = repeat(self.cls_token_large, '() n d -> b n d', b=b)
xl = torch.cat((cls_token_large, xl), dim=1)
xl += self.pos_embedding_large[:, :(n + 1)]
xl = self.dropout_large(xl)
# ----------------------------------
# Temporal Multi-scale features learning
# ----------------------------------
Local_flag = True
for multi_scale_transformer in self.multi_scale_transformers:
xs, xm, xl = multi_scale_transformer(xs, xm, xl, Local_flag)
Local_flag = False
self.trans_feature = xm[:, 1:]
if self._args.temporal_consist:
tc_feat = self.TCMLP(torch.cat((xs[:, 1:], xm[:, 1:], xl[:, 1:]), dim=1)) #[b, s+m+l, 1024]
tc_feat = rearrange(tc_feat, 'b n (h w) -> b n h w', h=int(tc_feat.size(-1) ** 0.5))
tc_feat = self.temporal_reduce(tc_feat)
self.tc_feat = self.t_conv_group(tc_feat)
xs = xs.mean(dim=1) if self.pool == 'mean' else xs[:, 0]
xm = xm.mean(dim=1) if self.pool == 'mean' else xm[:, 0]
xl = xl.mean(dim=1) if self.pool == 'mean' else xl[:, 0]
if self._args.recoupling:
T = self._args.temper
distillation_loss = F.kl_div(F.log_softmax(spatial_weights.squeeze() / T, dim=-1),
F.softmax(((xs + xm + xl) / 3.).detach() / T, dim=-1),
reduction='sum')
else:
distillation_loss = torch.tensor(0.0).cuda()
if self._args.Network != 'FusionNet':
if self._args.sharpness:
temp = self.temp_schedule[self._args.epoch]
xs = self.mlp_head_small(xs) / temp
xm = self.mlp_head_media(xm) / temp
xl = self.mlp_head_large(xl) / temp
else:
xs = self.mlp_head_small(xs)
xm = self.mlp_head_media(xm)
xl = self.mlp_head_large(xl)
if self.branch_merge == 'sum':
x = xs + xm + xl
elif self.branch_merge == 'pool':
x = self.max_pool(torch.cat((xs.unsqueeze(2), xm.unsqueeze(2), xl.unsqueeze(2)), dim=-1)).squeeze()
# ---------------------------------
# Get score from multi-branch Trans for visualization
# ---------------------------------
scores_small = self.multi_scale_transformers[2].transformer_enc_small.layers[-1][0].fn.scores
scores_media = self.multi_scale_transformers[2].transformer_enc_media.layers[-1][0].fn.scores
scores_large = self.multi_scale_transformers[2].transformer_enc_large.layers[-1][0].fn.scores
# resize attn
attn_media = scores_media.detach().clone()
attn_media.resize_(*scores_small.size())
attn_large = scores_large.detach().clone()
attn_large.resize_(*scores_small.size())
att_small = scores_small.detach().clone()
scores = torch.cat((att_small, attn_media, attn_large), dim=1) # [2, 24, 17, 17]
att_map = torch.zeros(scores.size(0), scores.size(1), scores.size(1), dtype=torch.float)
for b in range(scores.size(0)):
for i, s1 in enumerate(scores[b]):
for j, s2 in enumerate(scores[b]):
cosin_simil = torch.cosine_similarity(s1.view(1, -1), s2.view(1, -1))
att_map[b][i][j] = cosin_simil
# --------------------------------
# Measure cosine similarity of xs and xl
# --------------------------------
cosin_similar_xs_xm = torch.cosine_similarity(xs[0], xm[0], dim=-1)
cosin_similar_xs_xl = torch.cosine_similarity(xs[0], xl[0], dim=-1)
cosin_similar_xm_xl = torch.cosine_similarity(xm[0], xl[0], dim=-1)
cosin_similar_sum = cosin_similar_xs_xm + cosin_similar_xs_xl + cosin_similar_xm_xl
return (x, xs, xm, xl), distillation_loss, (att_map, cosin_similar_sum.cpu(),
(scores_small[0], scores_media[0], scores_large[0]), visweight[0]) | 18,908 | 42.87239 | 159 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/DSN_Fusion.py | '''
This file is modified from:
https://github.com/deepmind/kinetics-i3d/i3d.py
'''
import torch
import torch.nn as nn
from einops.layers.torch import Rearrange
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cv2
import os, math
import sys
from .DTN import DTNNet
from .FRP import FRP_Module
from .utils import *
import os, math
import sys
sys.path.append('../../')
from collections import OrderedDict
from utils import load_pretrained_checkpoint
import logging
class DSNNet(nn.Module):
VALID_ENDPOINTS = (
'Conv3d_1a_7x7',
'MaxPool3d_2a_3x3',
'Conv3d_2b_1x1',
'Conv3d_2c_3x3',
'MaxPool3d_3a_3x3',
'Mixed_3b',
'Mixed_3c',
'MaxPool3d_4a_3x3',
'Mixed_4b',
'Mixed_4c',
'MaxPool3d_5a_2x2',
'Mixed_5b',
'Mixed_5c'
)
def __init__(self, args, num_classes=400, spatial_squeeze=True, name='inception_i3d', in_channels=3, dropout_keep_prob=0.5,
pretrained: str = False):
super(DSNNet, self).__init__()
self._num_classes = num_classes
self._spatial_squeeze = spatial_squeeze
self.logits = None
self.args = args
self.end_points = {}
'''
Low Level Features Extraction
'''
end_point = 'Conv3d_1a_7x7'
self.end_points[end_point] = Unit3D(in_channels=in_channels, output_channels=64, kernel_shape=[1, 7, 7],
stride=(1, 2, 2), padding=(0, 3, 3), name=name + end_point)
end_point = 'MaxPool3d_2a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2),
padding=0)
end_point = 'Conv3d_2b_1x1'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=64, kernel_shape=[1, 1, 1], padding=0,
name=name + end_point)
end_point = 'Conv3d_2c_3x3'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=192, kernel_shape=[1, 3, 3],
padding=(0, 1, 1),
name=name + end_point)
end_point = 'MaxPool3d_3a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2),
padding=0)
'''
Spatial Multi-scale Features Learning
'''
end_point = 'Mixed_3b'
self.end_points[end_point] = SpatialInceptionModule(192, [64, 96, 128, 16, 32, 32], name + end_point)
end_point = 'Mixed_3c'
self.end_points[end_point] = SpatialInceptionModule(256, [128, 128, 192, 32, 96, 64], name + end_point)
end_point = 'MaxPool3d_4a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2),
padding=0)
end_point = 'Mixed_4b'
self.end_points[end_point] = SpatialInceptionModule(128 + 192 + 96 + 64, [192, 96, 208, 16, 48, 64], name + end_point)
end_point = 'Mixed_4c'
self.end_points[end_point] = SpatialInceptionModule(192 + 208 + 48 + 64, [160, 112, 224, 24, 64, 64], name + end_point)
end_point = 'MaxPool3d_5a_2x2'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 2, 2], stride=(1, 2, 2),
padding=0)
end_point = 'Mixed_5b'
self.end_points[end_point] = SpatialInceptionModule(160 + 224 + 64 + 64, [256, 160, 320, 32, 128, 128],
name + end_point)
end_point = 'Mixed_5c'
self.end_points[end_point] = SpatialInceptionModule(256 + 320 + 128 + 128, [384, 192, 384, 48, 128, 128],
name + end_point)
self.LinearMap = nn.Sequential(
nn.LayerNorm(1024),
nn.Linear(1024, 512),
)
self.avg_pool = nn.AdaptiveAvgPool3d((None, 1, 1))
self.dropout = nn.Dropout(dropout_keep_prob)
self.build()
self.dtn = DTNNet(args, num_classes=self._num_classes)
self.rrange = Rearrange('b c t h w -> b t c h w')
if args.frp:
self.frp_module = FRP_Module(w=args.w, inplanes=64)
if pretrained:
load_pretrained_checkpoint(self, pretrained)
def build(self):
for k in self.end_points.keys():
self.add_module(k, self.end_points[k])
def forward(self, x=None, garr=None, endpoint=None):
if endpoint == 'spatial':
for end_point in self.VALID_ENDPOINTS:
if end_point in self.end_points:
if end_point in ['Mixed_3b']:
x = self._modules[end_point](x)
if self.args.frp:
x = self.frp_module(x, garr) + x
elif end_point in ['Mixed_4b']:
x = self._modules[end_point](x)
if self.args.frp:
x = self.frp_module(x, garr) + x
f = x
elif end_point in ['Mixed_5b']:
x = self._modules[end_point](x)
if self.args.frp:
x = self.frp_module(x, garr) + x
else:
x = self._modules[end_point](x)
x = self.avg_pool(x).view(x.size(0), x.size(1), -1).permute(0, 2, 1)
x = self.LinearMap(x)
return x
else:
logits, distillation_loss, (att_map, cosin_similar, MHAS, visweight) = self.dtn(x)
return logits, distillation_loss, (att_map, cosin_similar, MHAS, visweight)
| 5,925 | 36.27044 | 127 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/models.py | """
This file is modified from:
https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/deit.py
"""
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import torch
import torch.nn as nn
from functools import partial
from einops import rearrange, repeat
import torch.nn.functional as nnf
from torchvision.utils import save_image, make_grid
import numpy as np
import cv2
import random
random.seed(123)
from timm.models.vision_transformer import VisionTransformer, _cfg
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_, helpers, DropPath
from timm.models.resnet import Bottleneck, ResNet
from timm.models.resnet import _cfg as _cfg_resnet
from timm.models.helpers import build_model_with_cfg
__all__ = [
'deit_tiny_patch16_224', 'deit_small_patch16_224', 'deit_base_patch16_224',
'deit_tiny_distilled_patch16_224', 'deit_small_distilled_patch16_224',
'deit_base_distilled_patch16_224', 'deit_base_patch16_384',
'deit_base_distilled_patch16_384',
]
def TokensCutOff(x, tua = 0.4):
CLS, DIS = x[:, 0, :].unsqueeze(1), x[:, 1, :].unsqueeze(1)
tokens = x[:, 2:, :]
B, N, C = tokens.shape
mask = torch.ones(B, N, requires_grad=False).cuda()
prob = torch.rand(B, N, requires_grad=False).cuda()
mask = torch.where(prob > tua, mask, torch.full_like(mask, 1e-8))
TokenMask = mask.view(B, N, 1).expand_as(tokens)
x = tokens * TokenMask
x = torch.cat((CLS, DIS, x), dim=1)
return x
def FeatureCutOff(x, tua = 0.4):
CLS, DIS = x[:, 0, :].unsqueeze(1), x[:, 1, :].unsqueeze(1)
tokens = x[:, 2:, :]
B, N, C = tokens.shape
mask = torch.ones(B, C, requires_grad=False).cuda()
prob = torch.rand(B, C, requires_grad=False).cuda()
mask = torch.where(prob > tua, mask, torch.full_like(mask, 1e-8))
TokenMask = mask.view(B, 1, C).expand_as(tokens)
x = tokens * TokenMask
x = torch.cat((CLS, DIS, x), dim=1)
return x
def shuffle_unit(features, shift, group, begin=0, return_idex=False):
batchsize = features.size(0)
dim = features.size(-1)
labels = torch.arange(0, features.size(-2), 1, device=features.device).expand(batchsize, -1)
# Shift Operation
feature_random = torch.cat([features[:, begin-1+shift:], features[:, begin:begin-1+shift]], dim=1)
labels = torch.cat([labels[:, begin-1+shift:], labels[:, begin:begin-1+shift]], dim=1)
x = feature_random
# Patch Shuffle Operation
x = x.view(batchsize, group, -1, dim)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batchsize, -1, dim)
labels = labels.view(batchsize, group, -1, 1)
labels = torch.transpose(labels, 1, 2).contiguous()
labels = labels.view(batchsize, -1)
if return_idex:
return x, labels
return x
def random_shuffle_unit(features, return_idex=False, batch_premutation=False, sort_label=None):
if sort_label:
B, N, C = features.shape
labels = []
perms_idx = []
for b in range(B):
perm_idx = random.choice(list(sort_label.keys()))
label = sort_label[perm_idx]
perms_idx.append(perm_idx + b * N)
labels.append(label)
perms_idx = torch.cat(perms_idx)
x = features.contiguous().view(-1, C)
x = x[perms_idx, :]
x = x.view(B, N, C)
if return_idex:
return x, torch.tensor(labels, device=features.device), perms_idx
if batch_premutation:
B, N, C = features.shape
labels = torch.arange(0, N, 1, device=features.device)
# labels = (labels - labels.min())/(labels.max() - labels.min()) + 1e-8
labels = labels.expand(B, -1)
# perturbation = torch.rand([B, N], device=features.device) - torch.rand([B, N], device=features.device)
# labels = labels + perturbation
index = torch.cat([torch.randperm(N) + b * N for b in range(B)], dim=0)
x = features.contiguous().view(-1, C)
x = x[index, :]
x = x.view(B, N, C)
labels = labels.contiguous().view(-1)[index].view(B, -1)
else:
batchsize = features.size(0)
dim = features.size(-1)
num_patch = features.size(-2)
labels = torch.arange(0, features.size(-2), 1, device=features.device)
# labels = (labels - labels.min())/(labels.max() - labels.min()) + 1e-8
labels = labels.expand(batchsize, -1)
# perturbation = torch.rand([B, N]) - torch.rand([B, N])
# labels = labels + perturbation
index = torch.randperm(features.size(-2))
labels = labels[:, index]
x = features[:, index, :]
if return_idex:
return x, labels, index
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.attn = None
def forward(self, x):
xori = x
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
self.attn = attn
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def get_attn(self):
return self.attn
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
drop_probs = helpers.to_2tuple(drop)
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.drop1 = nn.Dropout(drop_probs[0])
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop2 = nn.Dropout(drop_probs[1])
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.drop2(x)
return x
class Block(nn.Module):
def __init__(self, args, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class DistilledVisionTransformer(VisionTransformer):
def __init__(self, *args, **kwargs):
self._args = kwargs['args']
del kwargs['args']
super().__init__(*args, **kwargs)
self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
num_patches = self.patch_embed.num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 2, self.embed_dim))
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if self.num_classes > 0 else nn.Identity()
# dpr = [x.item() for x in torch.linspace(0, kwargs['drop_path_rate'], kwargs['depth'])] # stochastic depth decay rule
# self.blocks = nn.Sequential(*[
# Block(
# dim=kwargs['embed_dim'], num_heads=kwargs['num_heads'], mlp_ratio=kwargs['mlp_ratio'], qkv_bias=kwargs['qkv_bias'], drop=kwargs['drop_rate'],
# attn_drop=kwargs['attn_drop_rate'], drop_path=dpr[i], norm_layer=kwargs['norm_layer'], act_layer=kwargs['act_layer'])
# for i in range(kwargs['depth'])])
trunc_normal_(self.dist_token, std=.02)
trunc_normal_(self.pos_embed, std=.02)
self.head_dist.apply(self._init_weights)
self.shuffle = self._args.shuffle
self.Token_cutoff = self._args.Token_cutoff
self.tua_token = self._args.tua_token
self.Feature_cutoff = self._args.Feature_cutoff
self.tua_feature = self._args.tua_feature
def forward_features(self, x):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# with slight modifications to add the dist_token
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
dist_token = self.dist_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, dist_token, x), dim=1)
x = x + self.pos_embed
x = self.pos_drop(x)
if self.shuffle and self.training:
CLS, DIS = x[:, 0, :].unsqueeze(1), x[:, 1, :].unsqueeze(1)
x = shuffle_unit(x[:, 2:, :], shift=8, group=2)
x = torch.cat((CLS, DIS, x), dim=1)
for blk in self.blocks:
x = blk(x)
if self.Token_cutoff and self.training:
x = TokensCutOff(x, self.tua_token)
if self.Feature_cutoff and self.training:
x = FeatureCutOff(x, self.tua_feature)
x = self.norm(x)
return x[:, 0], x[:, 1]
def forward(self, x):
x, x_dist = self.forward_features(x)
x = self.head(x)
x_dist = self.head_dist(x_dist)
if self.training:
return x, x_dist
else:
# during inference, return the average of both classifier predictions
return (x + x_dist) / 2
class Video2Image(nn.Module):
def __init__(self, inp_channel=16):
super(Video2Image, self).__init__()
# self.MLP = nn.Sequential(
# # input: [B, N, C]
# nn.Linear(C, C//2),
# nn.ReLU(),
# nn.Linear(C//2, C)
# )
self.channel1 = nn.Conv2d(inp_channel, 1, kernel_size=1, stride=1, padding=0, bias=False)
self.channel2 = nn.Conv2d(inp_channel, 1, kernel_size=1, stride=1, padding=0, bias=False)
self.channel3 = nn.Conv2d(inp_channel, 1, kernel_size=1, stride=1, padding=0, bias=False)
self.bn = nn.BatchNorm2d(3)
self.relu = nn.ReLU(inplace=False)
self.channel1_reverse = nn.Conv2d(1, inp_channel, kernel_size=1, stride=1, padding=0, bias=False)
self.channel2_reverse = nn.Conv2d(1, inp_channel, kernel_size=1, stride=1, padding=0, bias=False)
self.channel3_reverse = nn.Conv2d(1, inp_channel, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_reverse = nn.BatchNorm3d(3)
self.relu_reverse = nn.ReLU(inplace=False)
self.compressed = None
def get_compressed_img(self):
return self.compressed
def forward(self, x):
B, C, T, H, W = x.shape
# x = rearrange(x, 'b c t h w -> (b c) t h w)')
x_channel1 = self.channel1(x[:, 0, :, :, :])
x_channel2 = self.channel2(x[:, 1, :, :, :])
x_channel3 = self.channel3(x[:, 2, :, :, :])
x = torch.cat((x_channel1, x_channel2, x_channel3), dim=1)
x = self.relu(self.bn(x))
self.compressed = x
x_channel1_reverse = self.channel1_reverse(x[:, 0, :, :].unsqueeze(1))
x_channel2_reverse = self.channel2_reverse(x[:, 1, :, :].unsqueeze(1))
x_channel3_reverse = self.channel3_reverse(x[:, 2, :, :].unsqueeze(1))
x_reverse = torch.cat((x_channel1_reverse.unsqueeze(1), x_channel2_reverse.unsqueeze(1), x_channel3_reverse.unsqueeze(1)), dim=1)
x_reverse = self.relu_reverse(self.bn_reverse(x_reverse))
return x, x_reverse
class VisionTransformer(VisionTransformer):
def __init__(self, *args, **kwargs):
self._args = kwargs['args']
del kwargs['args']
super().__init__(*args, **kwargs)
dpr = [x.item() for x in torch.linspace(0, kwargs['drop_path_rate'], kwargs['depth'])] # stochastic depth decay rule
self.blocks = nn.Sequential(*[
Block(
self._args, dim=kwargs['embed_dim'], num_heads=kwargs['num_heads'], mlp_ratio=kwargs['mlp_ratio'], qkv_bias=kwargs['qkv_bias'], drop=kwargs['drop_rate'],
drop_path=dpr[i], norm_layer=kwargs['norm_layer'])
for i in range(kwargs['depth'])])
num_patches = self.patch_embed.num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches+1, self.embed_dim))
trunc_normal_(self.pos_embed, std=.02)
self.video2Img = Video2Image(self._args.sample_duration)
def get_cls_token(self):
return self.CLSToken
def get_patch_token(self):
return self.PatchToken
def forward_features(self, x):
x = self.patch_embed(x)
cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_token, x), dim=1)
x = self.pos_drop(x + self.pos_embed)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
self.PatchToken = x[:, 1:]
return self.pre_logits(x[:, 0])
def forward(self, x):
# x.size: torch.Size([16, 3, 16, 224, 224])
x, x_reverse = self.video2Img(x)
x = self.forward_features(x)
self.CLSToken = x
x = self.head(x)
return x, x_reverse
@register_model
def deit_tiny_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_small_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_small_distilled_patch16_224(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_distilled_patch16_224(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_distilled_patch16_384(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
def _create_resnet(variant, pretrained=False, **kwargs):
del kwargs['args']
return build_model_with_cfg(ResNet, variant, pretrained, default_cfg=_cfg_resnet(), **kwargs)
@register_model
def resnet50(pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 6, 3], **kwargs)
model = _create_resnet('resnet50', pretrained, **model_args)
# model.default_cfg = _cfg_resnet()
return model
@register_model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
"""
model_args = dict(block=Bottleneck, layers=[3, 4, 23, 3], **kwargs)
model = _create_resnet('resnet101', pretrained, **model_args)
return model | 19,803 | 37.984252 | 169 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/__init__.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
from .build import *
from .model_ema import *
from .fusion_Net import * | 135 | 18.428571 | 54 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/model_ema.py | """
This file is modified from:
https://github.com/rwightman/pytorch-image-models/blob/main/timm/utils/model_ema.py
Exponential Moving Average (EMA) of model updates
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
from collections import OrderedDict
from copy import deepcopy
import torch
import torch.nn as nn
_logger = logging.getLogger(__name__)
class ModelEma(nn.Module):
""" Model Exponential Moving Average (DEPRECATED)
Keep a moving average of everything in the model state_dict (parameters and buffers).
This version is deprecated, it does not work with scripted models. Will be removed eventually.
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
smoothing of weights to match results. Pay attention to the decay constant you are using
relative to your update count per epoch.
To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
disable validation of the EMA weights. Validation will have to be done manually in a separate
process, or after the training stops converging.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, device='', resume=''):
super(ModelEma, self).__init__()
# make a copy of the model for accumulating moving average of weights
self.ema = deepcopy(model)
self.ema.eval()
self.decay = decay
self.device = device # perform ema on different device from model if set
if device:
self.ema.to(device=device)
self.ema_has_module = hasattr(self.ema, 'module')
if resume:
try:
# self._load_checkpoint(resume)
checkpoint = torch.load(resume, map_location='cpu')
# self.ema.load_state_dict(checkpoint['model_ema'])
new_state_dict = OrderedDict()
for k, v in checkpoint['model_ema'].items():
name = k[4:]
new_state_dict[name] = v
self.ema.load_state_dict(new_state_dict)
except:
checkpoint = torch.load(resume, map_location='cpu')
# self.ema.load_state_dict(checkpoint['model_ema'])
new_state_dict = OrderedDict()
for k, v in checkpoint['model'].items():
name = k
new_state_dict[name] = v
self.ema.load_state_dict(new_state_dict)
print("Loaded state_dict_ema")
for p in self.ema.parameters():
p.requires_grad_(False)
def _load_checkpoint(self, checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
assert isinstance(checkpoint, dict)
if 'state_dict_ema' in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict_ema'].items():
# ema model may have been wrapped by DataParallel, and need module prefix
if self.ema_has_module:
name = 'module.' + k if not k.startswith('module') else k
else:
name = k
new_state_dict[name] = v
self.ema.load_state_dict(new_state_dict)
_logger.info("Loaded state_dict_ema")
else:
_logger.warning("Failed to find state_dict_ema, starting from loaded model weights")
def update(self, model):
# correct a mismatch in state dict keys
needs_module = hasattr(model, 'module') and not self.ema_has_module
with torch.no_grad():
msd = model.state_dict()
for k, ema_v in self.ema.state_dict().items():
if needs_module:
k = 'module.' + k
model_v = msd[k].detach()
if self.device:
model_v = model_v.to(device=self.device)
ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v)
def forward(self, x):
return self.ema(x)
class ModelEmaV2(nn.Module):
""" Model Exponential Moving Average V2
Keep a moving average of everything in the model state_dict (parameters and buffers).
V2 of this module is simpler, it does not match params/buffers based on name but simply
iterates in order. It works with torchscript (JIT of full model).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
smoothing of weights to match results. Pay attention to the decay constant you are using
relative to your update count per epoch.
To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
disable validation of the EMA weights. Validation will have to be done manually in a separate
process, or after the training stops converging.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, device=None, resume=None):
super(ModelEmaV2, self).__init__()
# make a copy of the model for accumulating moving average of weights
self.module = deepcopy(model)
self.module.eval()
self.decay = decay
self.device = device # perform ema on different device from model if set
if self.device is not None:
self.module.to(device=device)
def _update(self, model, update_fn):
with torch.no_grad():
for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()):
if self.device is not None:
model_v = model_v.to(device=self.device)
ema_v.copy_(update_fn(ema_v, model_v))
def update(self, model):
self._update(model, update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m)
def set(self, model):
self._update(model, update_fn=lambda e, m: m)
def forward(self, x):
return self.module(x)
| 6,797 | 42.858065 | 102 | py |
MotionRGBD-PAMI | MotionRGBD-PAMI-main/lib/model/build.py | '''
Copyright (C) 2010-2021 Alibaba Group Holding Limited.
'''
from .DSN import DSNNet
from .DSN_v2 import DSNNetV2
from .fusion_Net import CrossFusionNet, SFNNet
from .models import *
from timm.models import create_model
import logging
def build_model(args):
num_classes = dict(
IsoGD=249,
NvGesture=25,
Jester=27,
THUREAD=40,
NTU=60,
UCF101=101
)
if args.num_classes is not None:
num_classes[args.dataset] = args.num_classes
func_dict = dict(
DSN=DSNNet,
DSNV2=DSNNetV2,
FusionNet=CrossFusionNet
)
assert args.dataset in num_classes, 'Error in load dataset !'
assert args.Network in func_dict, 'Error in Network function !'
args.num_classes = num_classes[args.dataset]
if args.local_rank == 0:
logging.info('Model:{}, Total Categories:{}'.format(args.Network, args.num_classes))
return func_dict[args.Network](args, num_classes=args.num_classes, pretrained=args.pretrained)
| 1,019 | 25.153846 | 98 | py |
paper-rule-adherence-dev | paper-rule-adherence-dev/adult_syn_00.py | import os
import pandas as pd
import numpy as np
import mostly_engine.core
## SPLIT data
df = pd.read_csv('data/adult_original.csv.gz')
df = df.loc[df['marital-status'] != 'Married-AF-spouse', :]
cols = ['age', 'education', 'education-num', 'marital-status', 'relationship', 'sex', 'income']
df = df[cols]
trn = df.sample(n=2_000)
hol = df.drop(trn.index, axis=0)
trn.to_csv('adult_original_2k.csv.gz', index=False)
hol.to_csv('adult_original_holdout.csv.gz', index=False)
## ENCODE data
mostly_engine.core.split(
'adult_original_2k.csv.gz',
tgt_encoding_types = {c: "categorical" for c in cols},
)
mostly_engine.core.analyze()
mostly_engine.core.encode()
## PERSIST rules
rule1 = pd.merge(
pd.Series(['Husband', 'Wife'], name='relationship'),
pd.Series(['Never-married', 'Divorced', 'Married-spouse-absent', 'Widowed', 'Separated'], name='marital-status'),
how='cross',
)
rule1 = pd.concat([rule1, pd.DataFrame({'relationship': ['Unmarried'], 'marital-status': ['Married-civ-spouse']})], axis=0)
df = pd.DataFrame({
'education-num': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
'education': ['Preschool', '1st-4th', '5th-6th', '7th-8th', '9th', '10th', '11th', '12th', 'HS-grad', 'Some-college', 'Assoc-voc', 'Assoc-acdm', 'Bachelors', 'Masters', 'Prof-school', 'Doctorate']
})
rule2 = pd.merge(
pd.merge(
df['education'].drop_duplicates(),
df['education-num'].drop_duplicates(),
how='cross',
),
df.assign(valid=True),
how='left',
)
rule2 = rule2.loc[rule2.valid.isna(),['education', 'education-num']]
rule3 = pd.merge(
pd.Series([17, 18, 19, 20, 21, 22, 23, 24], name='age'),
pd.Series(['Prof-school', 'Doctorate'], name='education'),
how='cross',
)
rule4 = pd.DataFrame({
'sex': ['Female', 'Male'],
'relationship': ['Husband', 'Wife'],
})
rule1.to_csv('rules/rule1.csv', index=False)
rule2.to_csv('rules/rule2.csv', index=False)
rule3.to_csv('rules/rule3.csv', index=False)
rule4.to_csv('rules/rule4.csv', index=False)
| 2,036 | 28.521739 | 200 | py |
paper-rule-adherence-dev | paper-rule-adherence-dev/adult_syn_01.py | import os
import pandas as pd
import mostly_engine.core
## TRAIN without RULES
mostly_engine.core.train()
| 108 | 12.625 | 26 | py |
paper-rule-adherence-dev | paper-rule-adherence-dev/adult_syn_02.py | import os
import pandas as pd
import mostly_engine.core
## GENERATE without RULES
syn_data_path = mostly_engine.core.generate(generation_size=100_000)
syn = pd.concat([pd.read_parquet(fn) for fn in syn_data_path.glob("*.parquet")])
syn.to_csv('adult_synthetic.csv.gz', index=False)
## GENERATE with RULES
rule1 = pd.read_csv('rules/rule1.csv')
rule2 = pd.read_csv('rules/rule2.csv')
rule3 = pd.read_csv('rules/rule3.csv')
rule4 = pd.read_csv('rules/rule4.csv')
syn_data_path = mostly_engine.core.generate(generation_size=100_000, rules=[rule1, rule2, rule3, rule4])
syn = pd.concat([pd.read_parquet(fn) for fn in syn_data_path.glob("*.parquet")])
syn.to_csv('adult_synthetic_gen.csv.gz', index=False)
| 708 | 29.826087 | 104 | py |
paper-rule-adherence-dev | paper-rule-adherence-dev/adult_syn_03.py | import os
import pandas as pd
import mostly_engine.core
## TRAIN with RULES
rule1 = pd.read_csv('rules/rule1.csv')
rule2 = pd.read_csv('rules/rule2.csv')
rule3 = pd.read_csv('rules/rule3.csv')
rule4 = pd.read_csv('rules/rule4.csv')
mostly_engine.core.train(rules=[rule1, rule2, rule3, rule4], rule_weight=5.0)
| 313 | 23.153846 | 77 | py |
paper-rule-adherence-dev | paper-rule-adherence-dev/adult_syn_04.py | import os
import pandas as pd
import mostly_engine.core
## GENERATE without RULES
syn_data_path = mostly_engine.core.generate(generation_size=100_000)
syn = pd.concat([pd.read_parquet(fn) for fn in syn_data_path.glob("*.parquet")])
syn.to_csv('adult_synthetic_trn.csv.gz', index=False)
## GENERATE with RULES
rule1 = pd.read_csv('rules/rule1.csv')
rule2 = pd.read_csv('rules/rule2.csv')
rule3 = pd.read_csv('rules/rule3.csv')
rule4 = pd.read_csv('rules/rule4.csv')
syn_data_path = mostly_engine.core.generate(generation_size=100_000, rules=[rule1, rule2, rule3, rule4])
syn = pd.concat([pd.read_parquet(fn) for fn in syn_data_path.glob("*.parquet")])
syn.to_csv('adult_synthetic_trn_gen.csv.gz', index=False)
| 716 | 30.173913 | 104 | py |
tinyxml2 | tinyxml2-master/setversion.py | #!/usr/bin/env python3
# Python program to set the version.
##############################################
import re
import sys
import optparse
def fileProcess( name, lineFunction ):
filestream = open( name, 'r' )
if filestream.closed:
print( "file " + name + " not open." )
return
output = ""
print( "--- Processing " + name + " ---------" )
while 1:
line = filestream.readline()
if not line: break
output += lineFunction( line )
filestream.close()
if not output: return # basic error checking
print( "Writing file " + name )
filestream = open( name, "w" );
filestream.write( output );
filestream.close()
def echoInput( line ):
return line
parser = optparse.OptionParser( "usage: %prog major minor build" )
(options, args) = parser.parse_args()
if len(args) != 3:
parser.error( "incorrect number of arguments" );
major = args[0]
minor = args[1]
build = args[2]
versionStr = major + "." + minor + "." + build
print ("Setting dox,tinyxml2.h")
print ("Version: " + major + "." + minor + "." + build)
#### Write the tinyxml.h ####
def engineRule( line ):
matchMajor = "static const int TIXML2_MAJOR_VERSION"
matchMinor = "static const int TIXML2_MINOR_VERSION"
matchBuild = "static const int TIXML2_PATCH_VERSION"
if line[0:len(matchMajor)] == matchMajor:
print( "1)tinyxml2.h Major found" )
return matchMajor + " = " + major + ";\n"
elif line[0:len(matchMinor)] == matchMinor:
print( "2)tinyxml2.h Minor found" )
return matchMinor + " = " + minor + ";\n"
elif line[0:len(matchBuild)] == matchBuild:
print( "3)tinyxml2.h Build found" )
return matchBuild + " = " + build + ";\n"
else:
return line;
fileProcess( "tinyxml2.h", engineRule )
def macroVersionRule( line ):
matchMajor = "#define TINYXML2_MAJOR_VERSION"
matchMinor = "#define TINYXML2_MINOR_VERSION"
matchBuild = "#define TINYXML2_PATCH_VERSION"
if line[0:len(matchMajor)] == matchMajor:
print( "1)macro Major found" )
return matchMajor + " " + major + "\n"
elif line[0:len(matchMinor)] == matchMinor:
print( "2)macro Minor found" )
return matchMinor + " " + minor + "\n"
elif line[0:len(matchBuild)] == matchBuild:
print( "3)macro Build found" )
return matchBuild + " " + build + "\n"
else:
return line;
fileProcess("tinyxml2.h", macroVersionRule)
#### Write the dox ####
def doxRule( line ):
match = "PROJECT_NUMBER"
if line[0:len( match )] == match:
print( "dox project found" )
return "PROJECT_NUMBER = " + major + "." + minor + "." + build + "\n"
else:
return line;
fileProcess( "dox", doxRule )
#### Write the CMakeLists.txt ####
def cmakeRule( line ):
matchVersion = "project(tinyxml2 VERSION"
if line[0:len(matchVersion)] == matchVersion:
print( "1)tinyxml2.h Major found" )
return matchVersion + " " + major + "." + minor + "." + build + ")\n"
else:
return line;
fileProcess( "CMakeLists.txt", cmakeRule )
def mesonRule(line):
match = re.search(r"(\s*version) : '(\d+.\d+.\d+)',", line)
if match:
print("1)meson.build version found.")
return "{} : '{}.{}.{}',\n".format(match.group(1), major, minor, build)
return line
fileProcess("meson.build", mesonRule)
print( "Release note:" )
print( '1. Build. g++ -Wall -DTINYXML2_DEBUG tinyxml2.cpp xmltest.cpp -o gccxmltest.exe' )
print( '2. Commit. git commit -am"setting the version to ' + versionStr + '"' )
print( '3. Tag. git tag ' + versionStr )
print( ' OR git tag -a ' + versionStr + ' -m [tag message]' )
print( 'Remember to "git push" both code and tag. For the tag:' )
print( 'git push origin [tagname]')
| 3,571 | 24.15493 | 92 | py |
DMH-Net | DMH-Net-main/visualization_from_json.py | import argparse
import json
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
from PIL import Image
from matplotlib.figure import Figure
from tqdm import trange
from e2plabel.e2plabelconvert import generatePerspective, VIEW_NAME, VIEW_ARGS
from postprocess.postprocess2 import _cal_p_pred_emask
from visualization import clearAxesLines
img_hw = (512, 512)
e_img_hw = (512, 1024)
def jsonToCor(filename):
H, W = e_img_hw
with open(filename) as f:
inferenced_result = json.load(f)
cor_id = np.array(inferenced_result['uv'], np.float32)
cor_id[:, 0] *= W
cor_id[:, 1] *= H
return cor_id
def txtToCor(filename):
with open(filename) as f:
cor = np.array([line.strip().split() for line in f if line.strip()], np.float32)
return cor
def resolveImgPath(s: str):
if s.find("pano") == 0 or s.find("camera") == 0:
return os.path.join("data/layoutnet_dataset/test/img", s)
else:
return os.path.join("data/matterport3d_layout/test/img", s)
def resolveGtCorPath(s: str):
if s.find("pano") == 0 or s.find("camera") == 0:
return os.path.join("data/layoutnet_dataset/test/label_cor", os.path.splitext(s)[0] + ".txt")
else:
return os.path.join("data/matterport3d_layout/test/label_cor", os.path.splitext(s)[0] + ".txt")
def corTo2DMask(e_img, cor):
pres = generatePerspective(e_img, cor, VIEW_NAME, VIEW_ARGS, img_hw)
lines = []
for d in pres:
lines.append(torch.tensor(d["lines"]))
masks2d = []
for view_idx in range(6):
thickness = int(round(img_hw[0] * 0.01))
mat = np.zeros((1, *img_hw))
for line in lines[view_idx]:
cv2.line(mat[0], torch.round(line[3:5]).to(torch.int64).numpy(),
torch.round(line[5:7]).to(torch.int64).numpy(), 1.0, thickness=thickness)
masks2d.append(torch.tensor(mat))
masks2d = torch.stack(masks2d)
maskEq = _cal_p_pred_emask(None, masks2d, img_hw, e_img_hw)
return maskEq
def wireframeGetMaskImg(e_img, cor, color) -> torch.Tensor:
maskEq = corTo2DMask(e_img, cor).squeeze()
mask_img = torch.cat([torch.tensor(color).repeat(*maskEq.shape[0:2], 1), maskEq.unsqueeze(-1)], 2)
mask_img = torch.round(mask_img * 255).to(torch.uint8)
return mask_img
def drawWireframeOnEImg(e_img, cor, color):
plt.imshow(wireframeGetMaskImg(e_img, cor, color).cpu().numpy())
fig: Figure = None
def show(output_path, name):
if output_path:
plt.savefig(os.path.join(output_path, imgPath + "." + name + ".png"))
else:
plt.show()
plt.close(fig)
def initFig():
global fig
fig = plt.figure(figsize=(10.24, 5.12))
plt.gcf().subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0)
ax = plt.gca()
clearAxesLines(ax)
CLASS_A = [
"7y3sRwLe3Va_9b72664399a34e4f9dbe470571c73187.png",
"B6ByNegPMKs_8b1abc1b47784d758b9ec1e079160475.png",
"camera_1a2b3c7901434d88bba55d6f2b28a6d5_office_30_frame_equirectangular_domain_.png",
"camera_7a42df17b40c4c15bfd6301823b6a476_office_22_frame_equirectangular_domain_.png",
"camera_8cbbb3e42c0e4e54b3b523b1fec6b3bc_office_33_frame_equirectangular_domain_.png",
"camera_412ba0d035b5432abd88ed447716f349_office_30_frame_equirectangular_domain_.png",
"camera_514bd77b98cc47ad904d6c8196f769b1_office_8_frame_equirectangular_domain_.png",
"camera_d162082c8f714aee8984195e0c5a7396_office_11_frame_equirectangular_domain_.png",
"e9zR4mvMWw7_f624a40d100144e696a39abe258ee090.png",
"pano_adxsvoaiehisue.png",
"pano_agpqpoljoyzxds.png",
"pano_ahvuapixtvirde.png",
"pano_aixninerbhvojf.png",
"pano_ankughvvgbhsom.png",
"pano_apozlylyjgtjid.png",
"uNb9QFRL6hY_1434b965c3c147419c4ff40310633b58.png",
"x8F5xyUWy9e_2669f5ba693c4e729d7d2c4f3fa0a077.png",
]
CLASS_B = [
"pano_aghlgnaxvjlzmb.png",
"7y3sRwLe3Va_92fb09a83f8949619b9dc5bda2855456.png",
"7y3sRwLe3Va_fdab6422162e49db822a37178ab70481.png",
"B6ByNegPMKs_53249ef8a94c4c40bd6f09c069e54d16.png",
"B6ByNegPMKs_bb2332e3d7ad40a59ee5ad0eae108dec.png",
"B6ByNegPMKs_ce2f5a74556c4be192df3ca7a178cefb.png",
"camera_32caf5752a4746c8b95f84e9acd9271d_office_29_frame_equirectangular_domain_.png",
"camera_63eb2cd447b84c5abac846f79c51dfcd_office_14_frame_equirectangular_domain_.png",
"camera_90af0a7fe0ed4a7db2c2e05727560231_office_15_frame_equirectangular_domain_.png",
"camera_270448008f5743f48f34539d36e4c4ae_office_14_frame_equirectangular_domain_.png",
"pano_auqcjiehbmenao.png",
"wc2JMjhGNzB_6e491bc8576345bda3cdde9ab216b7be.png",
]
CLASS_C_D = [
"7y3sRwLe3Va_9e4c92fd7eb74504baecf55a3264716e.png",
"7y3sRwLe3Va_6376b741b50a4418b3dc3fde791c3c09.png",
"B6ByNegPMKs_5b3d1c9fefb64512b0c9750a00feece4.png",
"B6ByNegPMKs_e5567bd5fa2d4fde8a6b9f15e3274a7e.png",
"e9zR4mvMWw7_5d711de78dbd400aa4cfd51fc05dfbee.png",
"pano_abbvryjplnajxo.png",
"pano_aqdafdzfhdukpg.png",
"uNb9QFRL6hY_d11f14ddecbe406681d4980365ea5a43.png",
"7y3sRwLe3Va_dd83fb40a2e14ac99de9fe9bcfaf44df.png",
"uNb9QFRL6hY_bcce4f23c12744c782c0b49b24a0331a.png",
"camera_a39f4a868cd84429a765324af21c6e6e_office_8_frame_equirectangular_domain_.png",
]
PANO_ARR = []
STF_ARR = []
MATTER_ARR = []
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--img', help='指定一张图片。如果不指定,那就会画所有')
parser.add_argument('--output_path', help='如果不指定,就会plt.show')
parser.add_argument('--draw_independent', "-d", action="store_true", help='独立画图还是一张图画好几次?')
parser.add_argument('--draw_both', "-b", action="store_true", help='两种方法都画')
parser.add_argument('--second', "-2", action="store_true", help='定义此项则画HoHoNet和AtlantaNet,否则画HorizonNet和LayoutNet')
args = parser.parse_args()
GT_COLOR = (0.0, 1.0, 0.0) # 绿
OUR_PATH, OUR_COLOR = "result_json", (1.0, 0.0, 0.0) # 红
HORIZONNET_PATH, HORIZONNET_COLOR = "eval_results/horizonnet_json", (0.0, 0.0, 1.0) # 蓝
LAYOUTNET_PATH, LAYOUTNET_COLOR = "eval_results/layoutnet_json", (1.0, 0.0, 1.0) # 粉
ATLANTANET_PATH, ATLANTANET_COLOR = "eval_results/atlantanet_json", (0.0, 0.0, 1.0) # 蓝
HOHONET_PATH, HOHONET_COLOR = "eval_results/hohonet_json", (1.0, 0.0, 1.0) # 粉
if args.img:
img_list = [args.img]
else:
img_list = [s.replace(".json", "") for s in os.listdir(OUR_PATH)]
# # TODO
# img_list = CLASS_A + CLASS_B + CLASS_C_D
# args.output_path = "result_6_pick"
if args.output_path:
os.makedirs(args.output_path, exist_ok=True)
for i in trange(len(img_list)):
imgPath = img_list[i]
isPanoStf = imgPath.find("pano") == 0 or imgPath.find("camera") == 0
if args.second and isPanoStf: continue
e_img = np.array(Image.open(resolveImgPath(imgPath))) / 255.0
gt_cor = txtToCor(resolveGtCorPath(imgPath))
myJsonPath = os.path.join(OUR_PATH, imgPath + ".json")
with open(myJsonPath) as f:
my_result = json.load(f)
iou3d = my_result["3DIoU"]
# # TODO
# if imgPath.find("pano") == 0:
# PANO_ARR.append(iou3d)
# elif imgPath.find("camera") == 0:
# STF_ARR.append(iou3d)
# else:
# MATTER_ARR.append(iou3d)
# continue
# 画的顺序:gt、layout、horizon、ours
if args.draw_both or (not args.draw_independent):
initFig()
plt.imshow(e_img)
drawWireframeOnEImg(e_img, gt_cor, GT_COLOR)
if not args.second:
drawWireframeOnEImg(e_img, txtToCor(os.path.join(LAYOUTNET_PATH, os.path.splitext(imgPath)[0] + (
"_aligned_rgb" if isPanoStf else "") + "_cor_id.txt")), LAYOUTNET_COLOR)
drawWireframeOnEImg(e_img, jsonToCor(os.path.join(HORIZONNET_PATH, os.path.splitext(imgPath)[0] + ".json")),
HORIZONNET_COLOR)
else:
drawWireframeOnEImg(e_img, jsonToCor(os.path.join(ATLANTANET_PATH, os.path.splitext(imgPath)[0] + ".json")),
ATLANTANET_COLOR)
drawWireframeOnEImg(e_img, txtToCor(os.path.join(HOHONET_PATH, os.path.splitext(imgPath)[0] + ".layout.txt")),
HOHONET_COLOR)
drawWireframeOnEImg(e_img, jsonToCor(myJsonPath), OUR_COLOR)
show(args.output_path, "all.{:.2f}".format(iou3d))
if args.draw_both or args.draw_independent:
if not args.second:
initFig()
plt.imshow(e_img)
drawWireframeOnEImg(e_img, gt_cor, GT_COLOR)
drawWireframeOnEImg(e_img, txtToCor(os.path.join(LAYOUTNET_PATH, os.path.splitext(imgPath)[0] + (
"_aligned_rgb" if isPanoStf else "") + "_cor_id.txt")), LAYOUTNET_COLOR)
show(args.output_path, "lay")
initFig()
plt.imshow(e_img)
drawWireframeOnEImg(e_img, gt_cor, GT_COLOR)
drawWireframeOnEImg(e_img, jsonToCor(os.path.join(HORIZONNET_PATH, os.path.splitext(imgPath)[0] + ".json")),
HORIZONNET_COLOR)
show(args.output_path, "hor")
else:
initFig()
plt.imshow(e_img)
drawWireframeOnEImg(e_img, gt_cor, GT_COLOR)
drawWireframeOnEImg(e_img, jsonToCor(os.path.join(ATLANTANET_PATH, os.path.splitext(imgPath)[0] + ".json")),
ATLANTANET_COLOR)
show(args.output_path, "atl".format(iou3d))
initFig()
plt.imshow(e_img)
drawWireframeOnEImg(e_img, gt_cor, GT_COLOR)
drawWireframeOnEImg(e_img, txtToCor(os.path.join(HOHONET_PATH, os.path.splitext(imgPath)[0] + ".layout.txt")),
HOHONET_COLOR)
show(args.output_path, "hoh".format(iou3d))
initFig()
plt.imshow(e_img)
drawWireframeOnEImg(e_img, gt_cor, GT_COLOR)
drawWireframeOnEImg(e_img, jsonToCor(myJsonPath), OUR_COLOR)
show(args.output_path, "our.{:.2f}".format(iou3d))
a = 1
# TODO
# import torch
# PANO_ARR = torch.tensor(PANO_ARR).sort(descending=True)[0]
# STF_ARR = torch.tensor(STF_ARR).sort(descending=True)[0]
# MATTER_ARR = torch.tensor(MATTER_ARR).sort(descending=True)[0]
# for a in [PANO_ARR,STF_ARR,MATTER_ARR]:
# pt = [round(len(a) / 4 * (i+1)) for i in range(3)]
# pt = [a[v] for v in pt]
# print(pt)
| 10,947 | 39.850746 | 130 | py |
DMH-Net | DMH-Net-main/visualization.py | import io
import math
import os
import time
from typing import Dict
import cv2
import numpy as np
try:
import open3d as o3d
except:
pass
import torch
from PIL import Image
from matplotlib import pyplot as plt
from matplotlib.figure import Figure
from e2plabel.e2plabelconvert import VIEW_NAME
from perspective_dataset import PerspectiveDataset
from postprocess.postprocess2 import postProcess, get_vote_mask_c_up_down, generatePred2DMask, _cal_p_pred_emask
DRAW_CUBE_POSITIONS = {
"F": [1, 1],
"R": [1, 2],
"B": [1, 3],
"L": [1, 0],
"U": [0, 1],
"D": [2, 1],
"E": [0, 1, 2, 4],
"3D": [2, 3, 2, 4],
"TEXT": [0, 0]
}
DEFAULT_DRAWTYPE = [["c", "y", "x", "gtlines", "e_rm", "3d", "text"]]
# DEFAULT_DRAWTYPE = [['gtlines_colored', 'border'], 'e_gt', ['c_cl2', 'y_cl2', 'x_cl2']] # GT可视化(论文图1)所用的配置
def clearAxesLines(ax: plt.Axes):
ax.set_xticks([])
ax.set_yticks([])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
def getCubeAxes(fig: plt.Figure, view_name):
spec = fig.add_gridspec(3, 4, hspace=0, wspace=0)
posi = DRAW_CUBE_POSITIONS[view_name]
if len(posi) == 2:
ax: plt.Axes = fig.add_subplot(spec[posi[0], posi[1]])
else:
ax: plt.Axes = fig.add_subplot(spec[posi[0]:posi[1], posi[2]:posi[3]])
clearAxesLines(ax)
return ax
def getMaskByType(type, cfg, input, output, img_idx, view_idx):
"""
对每个面画线
"""
p_img = input["p_imgs"][img_idx, view_idx]
prob = None
if type.find("x") == 0:
color_type = 0
if type.find("cl2") != -1: # 论文图1的Line Predictions中所用的颜色:空间中的竖线全为红,水平线则有的蓝有的绿
color_type = getLineColorType(view_idx, color_type)
color = p_img.new_zeros(3)
color[color_type] = 255
if type.find("pk") != -1:
color = torch.tensor([255, 255, 255], dtype=torch.float, device=p_img.device)
prob = output["p_preds_xy"].new_tensor(
PerspectiveDataset.generate_gradual_hough_label(input["peaks"][img_idx][view_idx][0],
input["xLabels"].shape[2], type="nearest_k", base=0.5))
elif type.find("gt") != -1:
prob = input["xLabels"][img_idx, view_idx]
if prob is not None: # 特殊情况,根据prob实时计算mat
mat = prob.unsqueeze(0).expand(p_img.shape[1], -1)
else: # 利用先前在后处理步骤中算完的
generatePred2DMask(cfg, input, output, img_idx)
mat = output["p_preds_2dmask"][img_idx][view_idx, 0]
elif type.find("y") == 0:
color_type = 1
if type.find("cl2") != -1:
color_type = getLineColorType(view_idx, color_type)
color = p_img.new_zeros(3)
color[color_type] = 255
if type.find("pk") != -1:
color = torch.tensor([255, 255, 255], dtype=torch.float, device=p_img.device)
prob = output["p_preds_xy"].new_tensor(
PerspectiveDataset.generate_gradual_hough_label(input["peaks"][img_idx][view_idx][1],
input["yLabels"].shape[2], type="nearest_k", base=0.5))
elif type.find("gt") != -1:
prob = input["yLabels"][img_idx, view_idx]
if prob is not None: # 特殊情况,根据prob实时计算mat
mat = prob.unsqueeze(1).expand(-1, p_img.shape[2])
else: # 利用先前在后处理步骤中算完的
generatePred2DMask(cfg, input, output, img_idx)
mat = output["p_preds_2dmask"][img_idx][view_idx, 1]
elif type.find("c") == 0:
def _genProb(probs):
"""
把长为(angle_num,2)的,最后一维依次表示cup和cdown的霍夫域上的向量,拼接起来变为,cdown接在cup上的向量
"""
# return torch.cat([probs[:, i] for i in range(probs.shape[1])]) # 该形式是原始的定义,与下面的完全等价但更复杂
return probs.T.reshape(-1)
color_type = 2
if type.find("cl2") != -1:
color_type = getLineColorType(view_idx, color_type)
color = p_img.new_zeros(3)
color[color_type] = 255
if type.find("pk") != -1:
color = torch.tensor([255, 255, 255], dtype=torch.float, device=p_img.device)
prob = torch.cat([
output["p_preds_cud"].new_tensor(
PerspectiveDataset.generate_gradual_hough_label(input["peaks"][img_idx][view_idx][2],
input["cUpLabels"].shape[2],
type="nearest_k", base=0.5)),
output["p_preds_cud"].new_tensor(
PerspectiveDataset.generate_gradual_hough_label(input["peaks"][img_idx][view_idx][3],
input["cDownLabels"].shape[2],
type="nearest_k", base=0.5))])
elif type.find("gt") != -1:
prob = torch.cat(
[input["cUpLabels"][img_idx, view_idx], input["cDownLabels"][img_idx, view_idx]])
elif type.find("raw") != -1:
prob = _genProb(output["raw_cud"][img_idx, view_idx])
if prob is not None: # 特殊情况,根据prob实时计算mat
vote_mask_c_up_down = get_vote_mask_c_up_down(cfg, p_img)
# mat = (prob * vote_mask_c_up_down).max(-1).values # OLD
mat = (prob * vote_mask_c_up_down).sum(-1) / vote_mask_c_up_down.sum(-1)
else: # 利用先前在后处理步骤中算完的
generatePred2DMask(cfg, input, output, img_idx)
mat = output["p_preds_2dmask"][img_idx][view_idx, 2]
elif type == "gtlines":
color = torch.tensor([255, 255, 255], dtype=torch.float, device=p_img.device)
thickness = int(round(p_img.shape[1] * 0.004))
mat = np.zeros(p_img.shape[1:])
for line in input["lines"][img_idx][view_idx]:
cv2.line(mat, torch.round(line[3:5]).to(torch.int64).numpy(),
torch.round(line[5:7]).to(torch.int64).numpy(), 1.0, thickness=thickness)
mat = p_img.new_tensor(mat)
else:
return None
return mat, color # mat: (h,w)
def getLineColorType(view_idx, line_type):
if VIEW_NAME[view_idx] == "F" or VIEW_NAME[view_idx] == "B":
if line_type == 0:
rgb = 0
elif line_type == 1:
rgb = 2
else:
rgb = 1
elif VIEW_NAME[view_idx] == "L" or VIEW_NAME[view_idx] == "R":
if line_type == 0:
rgb = 0
elif line_type == 1:
rgb = 1
else:
rgb = 2
elif VIEW_NAME[view_idx] == "U" or VIEW_NAME[view_idx] == "D":
if line_type == 0:
rgb = 1
elif line_type == 1:
rgb = 2
else:
rgb = 0
return rgb
def getGTLines2DMasks(cfg, input, output, img_idx):
masks2d = []
for view_idx in range(6):
p_img = input["p_imgs"][img_idx, view_idx]
thickness = int(round(p_img.shape[1] * 0.01))
mat = np.zeros(p_img.shape)
for line in input["lines"][img_idx][view_idx]:
color_type = getLineColorType(view_idx, line[7])
cv2.line(mat[color_type], torch.round(line[3:5]).to(torch.int64).numpy(),
torch.round(line[5:7]).to(torch.int64).numpy(), 1.0, thickness=thickness)
masks2d.append(input["p_imgs"].new_tensor(mat))
masks2d = torch.stack(masks2d)
maskEq = _cal_p_pred_emask(cfg, masks2d, input["p_imgs"].shape[-2:], input["e_img"].shape[-2:])
return masks2d, maskEq
def cvtRGBMatToDrawingNdArray(input):
"""
:param input tensor(3,h,w) float型 范围0~1
:return ndarray(h,w,4) int型 范围0~255
"""
input4 = torch.cat([input, torch.clamp(input.max(0)[0], 0.0, 1.0).unsqueeze(0)], 0)
return torch.clamp(torch.round(input4 * 255), 0.0, 255.0).permute(1, 2, 0).cpu().to(torch.uint8).numpy()
def drawEqualRectCorners(cfg, ax, type, input, output, img_idx, gt_cor_id, pred_cor_id):
e_img = input["e_img"][img_idx].permute(1, 2, 0).cpu().numpy()
ax.imshow(e_img)
if type.find("r") != -1:
cor = gt_cor_id.cpu().numpy()
ax.scatter(cor[:, 0], cor[:, 1], c="red", s=10)
if pred_cor_id is not None:
cor = pred_cor_id.cpu().numpy()
ax.scatter(cor[:, 0], cor[:, 1], c="green", s=10)
if type.find("m") != -1:
for one_draw_idx in range(3):
generatePred2DMask(cfg, input, output, img_idx)
mat = output["p_preds_emask"][img_idx][one_draw_idx]
color = mat.new_zeros(3)
color[one_draw_idx] = 255
mask_img = torch.cat([color.repeat(*mat.shape[0:2], 1), mat.unsqueeze(-1) * 255], 2)
mask_img = torch.round(mask_img).to(torch.uint8)
ax.imshow(mask_img.cpu().numpy())
if type.find("gt") != -1:
_, emask = getGTLines2DMasks(cfg, input, output, img_idx)
drawArray = cvtRGBMatToDrawingNdArray(emask)
ax.imshow(drawArray)
if type.find("w") != -1:
drawWireframeOnEImg(ax, e_img, gt_cor_id, (0.0, 1.0, 0.0))
drawWireframeOnEImg(ax, e_img, pred_cor_id, (1.0, 0.0, 0.0))
def o3dRunVis(vis):
vis.update_geometry()
vis.update_renderer()
vis.poll_events()
vis.run()
def o3dDrawLines(vis, lines_results, lwh, color=None):
points, lines, colors = cvtLinesResultsForDraw(lines_results, lwh, color)
line_pcd = o3d.LineSet()
line_pcd.lines = o3d.Vector2iVector(lines)
line_pcd.colors = o3d.Vector3dVector(colors)
line_pcd.points = o3d.Vector3dVector(points)
vis.add_geometry(line_pcd)
def pyplotGetCameraPos(gt_lwh):
return (2 * gt_lwh[2] - gt_lwh[3]).item()
def pyplotDrawLines(ax, cameraPos, lines_results, lwh, color=None):
points, lines, colors = cvtLinesResultsForDraw(lines_results, lwh, color)
points2d = points - np.array([0.0, cameraPos, 0.0])
points2d /= points2d[:, 1:2]
points2d = points2d[:, [0, 2]]
ax.set_facecolor("black")
for i, line in enumerate(lines):
p = points2d[line]
ax.plot(p[:, 0], p[:, 1], c=colors[i] if colors is not None else None, linewidth=1)
def drawWireframeOnEImg(ax, e_img, cor, color):
from visualization_from_json import wireframeGetMaskImg
ax.imshow(wireframeGetMaskImg(e_img, cor, color).cpu().numpy())
def cvtLinesResultsForDraw(lines_results, lwh, color=None):
points = []
lines = []
colors = []
for line in lines_results:
if color is None:
# if 0 <= line[6] <= 1:
# color = [1.0, 0.0, 0.0]
# elif 2 <= line[6] <= 3:
# color = [0.0, 1.0, 0.0]
# elif 4 <= line[6] <= 5:
# color = [0.0, 0.0, 1.0]
# elif 6 <= line[6] <= 7:
# color = [0.0, 0.0, 1.0]
if line[2] == 1:
color = [0.0, 1.0, 1.0]
else:
color = [0.0, 1.0, 0.0]
if line[1] == 0:
points.append([lwh[0], line[4], line[5]])
points.append([lwh[1], line[4], line[5]])
elif line[1] == 1:
points.append([line[3], lwh[2], line[5]])
points.append([line[3], lwh[3], line[5]])
elif line[1] == 2:
points.append([line[3], line[4], lwh[4]])
points.append([line[3], line[4], lwh[5]])
else:
assert False
lines.append([len(points) - 2, len(points) - 1])
colors.append(color)
return np.array(points), np.array(lines), colors
def o3dInitVis():
"""
Open3D自带的坐标轴中,红色是x轴,绿色是y轴,蓝色是z轴!
"""
vis = o3d.visualization.VisualizerWithKeyCallback()
def save_view_point(vis, filename):
param = vis.get_view_control().convert_to_pinhole_camera_parameters()
o3d.io.write_pinhole_camera_parameters(filename, param)
vis.register_key_callback(ord("S"), lambda vis: save_view_point(vis, "./view-" + str(int(time.time())) + ".json"))
vis.create_window(width=1386, height=752)
if os.path.exists("./view-1617968465.json"):
vis.get_view_control().convert_from_pinhole_camera_parameters(
o3d.read_pinhole_camera_parameters("./view-1617968465.json"))
renderOption: o3d.RenderOption = vis.get_render_option()
renderOption.background_color = np.array([0, 0, 0], dtype=np.float32)
renderOption.show_coordinate_frame = True
renderOption.point_size = 0.1
return vis
def makeLwhLines(lwh):
result = [
[0, 0, 0, math.nan, lwh[2], lwh[4], 0, 0],
[0, 0, 0, math.nan, lwh[2], lwh[5], 0, 0],
[0, 0, 0, math.nan, lwh[3], lwh[4], 0, 0],
[0, 0, 0, math.nan, lwh[3], lwh[5], 0, 0],
[0, 1, 0, lwh[0], math.nan, lwh[4], 0, 0],
[0, 1, 0, lwh[0], math.nan, lwh[5], 0, 0],
[0, 1, 0, lwh[1], math.nan, lwh[4], 0, 0],
[0, 1, 0, lwh[1], math.nan, lwh[5], 0, 0],
[0, 2, 0, lwh[0], lwh[2], math.nan, 0, 0],
[0, 2, 0, lwh[0], lwh[3], math.nan, 0, 0],
[0, 2, 0, lwh[1], lwh[2], math.nan, 0, 0],
[0, 2, 0, lwh[1], lwh[3], math.nan, 0, 0],
]
return lwh.new_tensor(result)
def visualize(cfg, input, output, drawtypes=None, show=False, dpi=None) -> Dict[str, np.ndarray]:
postResults = [postProcess(cfg, input, output, img_idx) for img_idx in range(input["p_imgs"].shape[0])]
return visualizeWithPostResults(cfg, input, output, postResults, drawtypes, show, dpi)
DRAW_3D_PREDBOX_COLOR = "blue"
DRAW_3D_GTBOX_COLOR = "white"
def visualizeWithPostResults(cfg, input, output, postResults: list, drawtypes=None, show=False, dpi=None) -> Dict[
str, np.ndarray]:
"""
结果可视化
:param input 数据集给出的输入
:param output 模型给出的输出
:param postResults 数组,内含input中的每张图片调用postProcess函数返回的结果
:param post_result postProcess函数返回的值
:return 字典,key是字符串,value是(h,w,3)的ndarray,图片的RGB矩阵。
"""
if drawtypes is None:
drawtypes = DEFAULT_DRAWTYPE
result = {}
with torch.no_grad():
for img_idx in range(input["p_imgs"].shape[0]):
(gt_lines, gt_lwh, gt_cor_id), (pred_lines, pred_lwh, pred_cor_id), metric = postResults[img_idx]
for draw_idx, one_fig_types in enumerate(drawtypes):
fig: Figure = plt.figure(dpi=dpi)
fig.subplots_adjust(top=1, bottom=0, left=0, right=1, hspace=0, wspace=0) # 清除四周边距
if isinstance(one_fig_types, str) and one_fig_types.find("e") == 0:
# 画单一的全景图
ax = plt.gca()
clearAxesLines(ax)
drawEqualRectCorners(cfg, ax, one_fig_types, input, output, img_idx, gt_cor_id, pred_cor_id)
elif isinstance(one_fig_types, str) and one_fig_types.find("3d") == 0:
ax = plt.gca()
clearAxesLines(ax)
cameraPos = pyplotGetCameraPos(gt_lwh)
if one_fig_types.find("predbox") != -1:
pyplotDrawLines(ax, cameraPos, makeLwhLines(pred_lwh), pred_lwh, DRAW_3D_PREDBOX_COLOR)
if one_fig_types.find("gtbox") != -1:
pyplotDrawLines(ax, cameraPos, makeLwhLines(gt_lwh), gt_lwh, DRAW_3D_GTBOX_COLOR)
pyplotDrawLines(ax, cameraPos, gt_lines, gt_lwh, "red")
pyplotDrawLines(ax, cameraPos, pred_lines, pred_lwh, "green")
elif isinstance(one_fig_types, list):
# 画左上角、右下角、右上角
for type in one_fig_types:
if type.find("e") == 0:
ax = getCubeAxes(fig, "E")
drawEqualRectCorners(cfg, ax, type, input, output, img_idx, gt_cor_id, pred_cor_id)
elif type.find("3d") == 0 and gt_lwh is not None and pred_lwh is not None:
ax = getCubeAxes(fig, "3D")
cameraPos = pyplotGetCameraPos(gt_lwh)
if type.find("predbox") != -1:
pyplotDrawLines(ax, cameraPos, makeLwhLines(pred_lwh), pred_lwh, DRAW_3D_PREDBOX_COLOR)
if type.find("gtbox") != -1:
pyplotDrawLines(ax, cameraPos, makeLwhLines(gt_lwh), gt_lwh, DRAW_3D_GTBOX_COLOR)
pyplotDrawLines(ax, cameraPos, gt_lines, gt_lwh, "red")
if len(pred_lines) > 0:
pyplotDrawLines(ax, cameraPos, pred_lines, pred_lwh, "green")
elif type == "text":
ax = getCubeAxes(fig, "TEXT")
toWrite = ""
if "CE" in metric and "PE" in metric:
toWrite = "CE:{:.3f} PE:{:.3f}\n" \
"3DIoU:{:.2f}\n" \
" ".format(
# "gt:{:s}\n {:s}\n" \
# "pr:{:s}\n {:s}\n".format(
metric["CE"], metric["PE"], metric["3DIoU"],
",".join(["{:.2f}".format(v.item()) for v in gt_lwh[0:3]]),
",".join(["{:.2f}".format(v.item()) for v in gt_lwh[3:6]]),
",".join(["{:.2f}".format(v.item()) for v in pred_lwh[0:3]]),
",".join(["{:.2f}".format(v.item()) for v in pred_lwh[3:6]]), )
elif "rmse" in metric and "delta_1" in metric:
toWrite = "3DIoU:{:.2f}\n" \
"2DIoU:{:.2f}\n" \
"delta_1:{:.3f}\n" \
" ".format(
# "rmse:{:.4f}\n".format(
metric["3DIoU"], metric["2DIoU"], metric["delta_1"], metric["rmse"],
)
ax.text(0, 0, toWrite)
# if "nz" in metric:
# ax.text(0, 0.85, "nz:" + metric["nz"], color="blue")
# if "noline" in metric:
# ax.text(0, 0.75, "nl:" + metric["noline"], color="red")
mask_buffer = {}
gtlines_colored_masks2d = None
# 画每个面
for view_idx, view_name in enumerate(VIEW_NAME):
ax = getCubeAxes(fig, view_name)
p_img = input["p_imgs"][img_idx, view_idx]
if one_fig_types[0] != "canny":
ax.imshow(p_img.permute(1, 2, 0).cpu().numpy())
one_fig_types_2 = one_fig_types
else:
ax.imshow(np.expand_dims(output["canny_image"][img_idx][view_idx], 2).repeat(3, 2))
one_fig_types_2 = one_fig_types[1:]
mask_buffer[view_idx] = {}
# 画每种类型的线
for type in one_fig_types_2:
t = getMaskByType(type, cfg, input, output, img_idx, view_idx)
if t is not None:
mat, color = t
mask_buffer[view_idx][type] = mat
mask_img = torch.cat([color.repeat(*mat.shape[0:2], 1), mat.unsqueeze(-1) * 255], 2)
mask_img = torch.round(mask_img).to(torch.uint8)
ax.imshow(mask_img.cpu().numpy())
if type == "gtlines_colored":
if gtlines_colored_masks2d is None:
gtlines_colored_masks2d, _ = getGTLines2DMasks(cfg, input, output, img_idx)
drawArray = cvtRGBMatToDrawingNdArray(gtlines_colored_masks2d[view_idx])
ax.imshow(drawArray)
if type == "border":
# 画一个白边框
BORDER_WIDTH = 2
img_hw = p_img.shape[-2:]
white_border_mask = np.ones((*img_hw, 4), dtype=np.uint8) * 255
white_border_mask[BORDER_WIDTH:img_hw[0] - BORDER_WIDTH,
BORDER_WIDTH:img_hw[1] - BORDER_WIDTH, 3] = 0
ax.imshow(white_border_mask)
if type.find("hough_line") == 0:
ax.set_xlim(0, 512)
ax.set_ylim(0, 512)
ax.invert_yaxis()
liness = output["hough_lines"][img_idx][view_idx]
for ii, lines in enumerate(liness):
for jj, line in enumerate(lines):
color = "b"
if jj == 0:
if type.find("red") != -1: continue
else:
if type.find("first_only") != -1:
continue
ax.plot([line[0], line[2]], [line[1], line[3]], color)
if type.find("red") != -1:
for ii, lines in enumerate(liness):
if len(lines) > 0:
line = lines[0]
color = "r"
ax.plot([line[0], line[2]], [line[1], line[3]], color)
# 获得图片
buf = io.BytesIO()
fig.savefig(buf, format="jpg")
buf.seek(0)
img = Image.open(buf) # 使用Image打开图片数据
img = np.asarray(img)
buf.close()
if show:
fig.show()
else:
result["{:s}--{:s}".format(input["filename"][img_idx], str(draw_idx))] = img
plt.close()
return result
| 22,450 | 43.021569 | 119 | py |
DMH-Net | DMH-Net-main/eval_general.py | import argparse
import json
import numpy as np
from shapely.geometry import Polygon
from tqdm import tqdm
from eval_cuboid import prepare_gtdt_pairs
from misc import post_proc, panostretch
def sort_xy_filter_unique(xs, ys, y_small_first=True):
xs, ys = np.array(xs), np.array(ys)
idx_sort = np.argsort(xs + ys / ys.max() * (int(y_small_first) * 2 - 1))
xs, ys = xs[idx_sort], ys[idx_sort]
_, idx_unique = np.unique(xs, return_index=True)
xs, ys = xs[idx_unique], ys[idx_unique]
assert np.all(np.diff(xs) > 0)
return xs, ys
def cor_2_1d(cor, H, W):
bon_ceil_x, bon_ceil_y = [], []
bon_floor_x, bon_floor_y = [], []
n_cor = len(cor)
for i in range(n_cor // 2):
xys = panostretch.pano_connect_points(cor[i * 2],
cor[(i * 2 + 2) % n_cor],
z=-50, w=W, h=H)
bon_ceil_x.extend(xys[:, 0])
bon_ceil_y.extend(xys[:, 1])
for i in range(n_cor // 2):
xys = panostretch.pano_connect_points(cor[i * 2 + 1],
cor[(i * 2 + 3) % n_cor],
z=50, w=W, h=H)
bon_floor_x.extend(xys[:, 0])
bon_floor_y.extend(xys[:, 1])
bon_ceil_x, bon_ceil_y = sort_xy_filter_unique(bon_ceil_x, bon_ceil_y, y_small_first=True)
bon_floor_x, bon_floor_y = sort_xy_filter_unique(bon_floor_x, bon_floor_y, y_small_first=False)
bon = np.zeros((2, W))
bon[0] = np.interp(np.arange(W), bon_ceil_x, bon_ceil_y, period=W)
bon[1] = np.interp(np.arange(W), bon_floor_x, bon_floor_y, period=W)
bon = ((bon + 0.5) / H - 0.5) * np.pi
return bon
def layout_2_depth(cor_id, h, w, return_mask=False):
# Convert corners to per-column boundary first
# Up -pi/2, Down pi/2
vc, vf = cor_2_1d(cor_id, h, w)
vc = vc[None, :] # [1, w]
vf = vf[None, :] # [1, w]
assert (vc > 0).sum() == 0
assert (vf < 0).sum() == 0
# Per-pixel v coordinate (vertical angle)
vs = ((np.arange(h) + 0.5) / h - 0.5) * np.pi
vs = np.repeat(vs[:, None], w, axis=1) # [h, w]
# Floor-plane to depth
floor_h = 1.6
floor_d = np.abs(floor_h / np.sin(vs))
# wall to camera distance on horizontal plane at cross camera center
cs = floor_h / np.tan(vf)
# Ceiling-plane to depth
ceil_h = np.abs(cs * np.tan(vc)) # [1, w]
ceil_d = np.abs(ceil_h / np.sin(vs)) # [h, w]
# Wall to depth
wall_d = np.abs(cs / np.cos(vs)) # [h, w]
# Recover layout depth
floor_mask = (vs > vf)
ceil_mask = (vs < vc)
wall_mask = (~floor_mask) & (~ceil_mask)
depth = np.zeros([h, w], np.float32) # [h, w]
depth[floor_mask] = floor_d[floor_mask]
depth[ceil_mask] = ceil_d[ceil_mask]
depth[wall_mask] = wall_d[wall_mask]
assert (depth == 0).sum() == 0
if return_mask:
return depth, floor_mask, ceil_mask, wall_mask
return depth
def test_general(dt_cor_id, gt_cor_id, w, h, losses):
dt_floor_coor = dt_cor_id[1::2]
dt_ceil_coor = dt_cor_id[0::2]
gt_floor_coor = gt_cor_id[1::2]
gt_ceil_coor = gt_cor_id[0::2]
assert (dt_floor_coor[:, 0] != dt_ceil_coor[:, 0]).sum() == 0
assert (gt_floor_coor[:, 0] != gt_ceil_coor[:, 0]).sum() == 0
# Eval 3d IoU and height error(in meter)
N = len(dt_floor_coor)
ch = -1.6
dt_floor_xy = post_proc.np_coor2xy(dt_floor_coor, ch, 1024, 512, floorW=1, floorH=1)
gt_floor_xy = post_proc.np_coor2xy(gt_floor_coor, ch, 1024, 512, floorW=1, floorH=1)
dt_poly = Polygon(dt_floor_xy)
gt_poly = Polygon(gt_floor_xy)
if not gt_poly.is_valid:
print('Skip ground truth invalid (%s)' % gt_path)
return
# 2D IoU
try:
area_dt = dt_poly.area
area_gt = gt_poly.area
area_inter = dt_poly.intersection(gt_poly).area
iou2d = area_inter / (area_gt + area_dt - area_inter)
except:
iou2d = 0.0
# 3D IoU
try:
cch_dt = post_proc.get_z1(dt_floor_coor[:, 1], dt_ceil_coor[:, 1], ch, 512)
cch_gt = post_proc.get_z1(gt_floor_coor[:, 1], gt_ceil_coor[:, 1], ch, 512)
h_dt = abs(cch_dt.mean() - ch)
h_gt = abs(cch_gt.mean() - ch)
area3d_inter = area_inter * min(h_dt, h_gt)
area3d_pred = area_dt * h_dt
area3d_gt = area_gt * h_gt
iou3d = area3d_inter / (area3d_pred + area3d_gt - area3d_inter)
except:
iou3d = 0.0
# rmse & delta_1
gt_layout_depth = layout_2_depth(gt_cor_id, h, w)
try:
dt_layout_depth = layout_2_depth(dt_cor_id, h, w)
except:
dt_layout_depth = np.zeros_like(gt_layout_depth)
rmse = ((gt_layout_depth - dt_layout_depth) ** 2).mean() ** 0.5
thres = np.maximum(gt_layout_depth / dt_layout_depth, dt_layout_depth / gt_layout_depth)
delta_1 = (thres < 1.25).mean()
# Add a result
n_corners = len(gt_floor_coor)
if n_corners % 2 == 1:
n_corners_str = 'odd'
elif n_corners < 10:
n_corners_str = str(n_corners)
else:
n_corners_str = '10+'
losses["n_corners_type"] = n_corners_str
losses['2DIoU'] = iou2d * 100
losses['3DIoU'] = iou3d * 100
losses['rmse'] = rmse
losses['delta_1'] = delta_1
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dt_glob',
help='NOTE: Remeber to quote your glob path.'
'Files assumed to be json from inference.py')
parser.add_argument('--gt_glob',
help='NOTE: Remeber to quote your glob path.'
'Files assumed to be txt')
parser.add_argument('--w', default=1024, type=int,
help='GT images width')
parser.add_argument('--h', default=512, type=int,
help='GT images height')
args = parser.parse_args()
# Prepare (gt, dt) pairs
gtdt_pairs = prepare_gtdt_pairs(args.gt_glob, args.dt_glob)
# Testing
losses = dict([
(n_corner, {'2DIoU': [], '3DIoU': [], 'rmse': [], 'delta_1': []})
for n_corner in ['4', '6', '8', '10+', 'odd', 'overall']
])
for gt_path, dt_path in tqdm(gtdt_pairs, desc='Testing'):
# Parse ground truth
with open(gt_path) as f:
gt_cor_id = np.array([l.split() for l in f], np.float32)
# Parse inferenced result
with open(dt_path) as f:
dt = json.load(f)
dt_cor_id = np.array(dt['uv'], np.float32)
dt_cor_id[:, 0] *= args.w
dt_cor_id[:, 1] *= args.h
test_general(dt_cor_id, gt_cor_id, args.w, args.h, losses)
for k, result in losses.items():
iou2d = np.array(result['2DIoU'])
iou3d = np.array(result['3DIoU'])
rmse = np.array(result['rmse'])
delta_1 = np.array(result['delta_1'])
if len(iou2d) == 0:
continue
print('GT #Corners: %s (%d instances)' % (k, len(iou2d)))
print(' 2DIoU : %.2f' % (iou2d.mean() * 100))
print(' 3DIoU : %.2f' % (iou3d.mean() * 100))
print(' RMSE : %.2f' % (rmse.mean()))
print(' delta^1: %.2f' % (delta_1.mean()))
| 7,264 | 34.612745 | 99 | py |
DMH-Net | DMH-Net-main/verify_vote.py | import argparse
import torch
from torch.utils.data import DataLoader
from tqdm import trange
from config import cfg, cfg_from_yaml_file, cfg_from_list
from e2plabel.e2plabelconvert import VIEW_NAME
from perspective_dataset import PerspectiveDataset
from visualization import getMaskByType, visualize
from postprocess.postprocess2 import get_vote_mask_c_up_down
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--cfg_file', type=str, required=True, help='specify the config for training')
parser.add_argument('--visu_count', default=2, type=int, help='visualize how many batches')
parser.add_argument('--batch_size', default=1, type=int, help='mini-batch size')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
device = torch.device('cuda')
dataset_valid = PerspectiveDataset(cfg, "test")
loader_valid = DataLoader(dataset_valid,
args.batch_size,
collate_fn=dataset_valid.collate,
shuffle=False,
drop_last=False,
num_workers=0,
pin_memory=True)
# 生成nearest_only类型的hough label的数据集
dataset_nearest_only = PerspectiveDataset(cfg, "test")
dataset_nearest_only.hough_label_gradual_type = "nearest_only"
iterator_valid = iter(loader_valid)
for valid_idx in trange(args.visu_count, desc='Verify CLine Vote', position=2):
input = next(iterator_valid)
def _core(input):
with torch.no_grad():
for k in input:
if isinstance(input[k], torch.Tensor):
input[k] = input[k].to(device)
matss = []
for img_idx in range(input["p_imgs"].shape[0]):
mats = []
for view_idx, view_name in enumerate(VIEW_NAME):
mat, _ = getMaskByType("gtc", cfg, input, None, img_idx, view_idx)
mats.append(mat)
matss.append(mats)
gtc_map = torch.stack([torch.stack(mats, dim=0) for mats in matss], dim=0)
vmask = get_vote_mask_c_up_down(cfg, input["p_imgs"])
vmu, vmd = vmask[:, :, 0:vmask.shape[-1] // 2], vmask[:, :, vmask.shape[-1] // 2:]
hough_c_up_vote = torch.matmul(gtc_map.reshape(*gtc_map.shape[0:2], -1), vmu.reshape(-1, vmu.shape[-1]))
hough_c_down_vote = torch.matmul(gtc_map.reshape(*gtc_map.shape[0:2], -1),
vmd.reshape(-1, vmd.shape[-1]))
hough_vote_res = torch.stack([hough_c_up_vote, hough_c_down_vote], dim=3)
hough_vote_res = hough_vote_res / hough_vote_res.max()
gtc_output = {
"raw_cud": hough_vote_res
}
visualize(cfg, input, gtc_output, drawtypes=[["c gt"], ["c raw"]], show=True, dpi=600)
_core(input)
_core(loader_valid.collate_fn([dataset_nearest_only.getItem(f) for f in input["filename"]]))
| 3,433 | 46.041096 | 120 | py |
DMH-Net | DMH-Net-main/model.py | import math
import types
from typing import Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from drn import drn_d_22, drn_d_38, drn_d_54
from e2plabel.e2plabelconvert import VIEW_NAME
from layers import FusionHoughStage, PerspectiveE2PP2E, HoughNewUpSampler
ENCODER_RESNET = ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'resnet_official_34']
ENCODER_DENSENET = ['densenet121', 'densenet169', 'densenet161', 'densenet201']
ENCODER_HOUGH = ['unet18', 'vgg16', 'drn38', 'drn22', 'drn54']
def OfficialResnetWrapper(model):
# 从torchvision 0.10.0源码的resnet.py中复制
def _forward_impl(self, x: torch.Tensor) -> Tuple[torch.Tensor, ...]: # Tuple[torch.Tensor * 5]
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
c1 = self.relu(x)
x = self.maxpool(c1)
c2 = self.layer1(x)
c3 = self.layer2(c2)
c4 = self.layer3(c3)
c5 = self.layer4(c4)
# x = self.avgpool(x) # 不需要分类系列特征,所以不要GAP和全连接
# x = torch.flatten(x, 1)
# x = self.fc(x)
return c1, c2, c3, c4, c5
model._forward_impl = types.MethodType(_forward_impl, model)
return model
class DMHNet(nn.Module):
x_mean = torch.FloatTensor(np.array([0.485, 0.456, 0.406])[None, :, None, None])
x_std = torch.FloatTensor(np.array([0.229, 0.224, 0.225])[None, :, None, None])
def __init__(self, cfg, backbone, use_rnn):
super(DMHNet, self).__init__()
self.cfg = cfg
self.backbone = backbone
self.use_rnn = use_rnn # 应该是没用到的参数
self.out_scale = 4
self.step_cols = 1
self.hidden_size = 256
self.fov = 160
# Encoder
def makeFeatureExtractor():
if backbone == "resnet_official_34":
from torchvision.models.resnet import resnet34
return OfficialResnetWrapper(resnet34(pretrained=True))
if backbone == "resnet_official_50":
from torchvision.models.resnet import resnet50
return OfficialResnetWrapper(resnet50(pretrained=True))
if backbone == "resnet_official_18":
from torchvision.models.resnet import resnet18
return OfficialResnetWrapper(resnet18(pretrained=True))
if backbone == "resnet_official_101":
from torchvision.models.resnet import resnet101
return OfficialResnetWrapper(resnet101(pretrained=True))
elif backbone.startswith('drn22'):
return drn_d_22(pretrained=True, out_middle=True)
elif backbone.startswith('drn38'):
return drn_d_38(pretrained=True, out_middle=True)
elif backbone.startswith('drn54'):
return drn_d_54(pretrained=True, out_middle=True)
else:
raise NotImplementedError()
self.feature_extractor = [makeFeatureExtractor()]
self._feature_extractor_ref = [0] * 7 # 第七个表示全景图所使用的feature_extractor
if self.cfg.MODEL.BACKBONE.PRIVATE_UPDOWN:
self.feature_extractor.append(makeFeatureExtractor())
self._feature_extractor_ref[4:6] = [len(self.feature_extractor) - 1] * 2
if self.cfg.MODEL.BACKBONE.PRIVATE_UP:
self.feature_extractor.append(makeFeatureExtractor())
self._feature_extractor_ref[5] = len(self.feature_extractor) - 1
self.feature_extractor = nn.ModuleList(self.feature_extractor)
# Input shape
H, W = 512, 1024
# Inference channels number from each block of the encoder
with torch.no_grad():
dummy = torch.zeros(1, 3, 512, 512)
if backbone.startswith('drn'):
net_out = self.feature_extractor[0](dummy)[1]
else:
net_out = self.feature_extractor[0](dummy)
c0, c1, c2, c3, c4 = [b.shape[1] for b in net_out]
size0, size1, size2, size3, size4 = [b.shape[2] for b in net_out]
self.c0, self.c1, self.c2, self.c3, self.c4 = c0, c1, c2, c3, c4
# print("c0, c1, c2, c3, c4", c0, c1, c2, c3, c4)
c_last = int((c1 * 8 + c2 * 4 + c3 * 4 + c4 * 4) / self.out_scale)
self.x_mean.requires_grad = False
self.x_std.requires_grad = False
def make5HoughModules():
return nn.ModuleList([
PerspectiveE2PP2E(self.cfg, size0, size0, size0, self.fov, c0, 1),
PerspectiveE2PP2E(self.cfg, size1, size1, size1, self.fov, c1, 1),
# TODO 对降维到hw=64的特征图,角度的霍夫投票个数还能是180吗?
PerspectiveE2PP2E(self.cfg, size2, size2, size2, self.fov, c2, 1,
hough_angles_num=90),
PerspectiveE2PP2E(self.cfg, size3, size3, size3, self.fov, c3, 1,
hough_angles_num=90),
PerspectiveE2PP2E(self.cfg, size4, size4, size4, self.fov, c4, 1,
hough_angles_num=90),
])
self.hough = [make5HoughModules(), make5HoughModules(), make5HoughModules()]
self._hough_ref = [0, 0, 0, 0, 1, 2]
self.hough = nn.ModuleList(self.hough)
def make2FusionModules():
factor = self.cfg.MODEL.get("CONV1_CHANNEL_FACTOR", 2)
return nn.ModuleList([
FusionHoughStage(self.cfg, "xy", 3, c0 // factor, c1 // factor, c2 // factor, c3 // factor,
c4 // factor,
upsample_rate=[512 // size0, 512 // size1, 512 // size2, 512 // size3,
512 // size4, ]), # xy hough特征的fusion
FusionHoughStage(self.cfg, "cupdown", 3, c0 // factor, c1 // factor, c2 // factor, c3 // factor,
c4 // factor,
upsample_rate=[512 // size0, 512 // size1, 512 // size2, 512 // size3, 512 // size4, ],
upsampler_class=HoughNewUpSampler),
# cupdown hough特征的fusion
])
self.fusion_stage = [make2FusionModules(), make2FusionModules(), make2FusionModules()]
self._fusion_stage_ref = [0, 0, 0, 0, 1, 2]
self.fusion_stage = nn.ModuleList(self.fusion_stage)
def _input_image_normalize(self, x):
if self.x_mean.device != x.device:
self.x_mean = self.x_mean.to(x.device)
self.x_std = self.x_std.to(x.device)
return (x[:, :3] - self.x_mean) / self.x_std
def _get_feature_extractor(self, i):
return self.feature_extractor[self._feature_extractor_ref[i]]
def _get_hough(self, i):
return self.hough[self._hough_ref[i]]
def _get_fusion_stage(self, i):
return self.fusion_stage[self._fusion_stage_ref[i]]
def forward(self, input):
results_dict = {}
p_xys = []
p_cuds = []
for view_idx in range(input["p_imgs"].shape[1]): # 对所有sample的每个view做循环
p_img = self._input_image_normalize(input["p_imgs"][:, view_idx])
p_conv_list = self._get_feature_extractor(view_idx)(p_img)
if len(p_conv_list) == 2: p_conv_list = p_conv_list[1]
p_hough_bin_feat = [hough(onefeat) for onefeat, hough in zip(p_conv_list, self._get_hough(view_idx))]
fusioner = self._get_fusion_stage(view_idx)
# Decoder for xy peaks
p_hough_feat_xy = [f[0] for f in p_hough_bin_feat]
p_xy = fusioner[0](p_hough_feat_xy)
p_xys.append(p_xy)
# 中心线的解码器
p_hough_feat_cud = [f[1] for f in p_hough_bin_feat]
p_cud = fusioner[1](p_hough_feat_cud)
p_cuds.append(p_cud)
results_dict.update({
"p_preds_xy": torch.cat(p_xys, 1),
"p_preds_cud": torch.cat(p_cuds, 1),
})
if self.cfg.MODEL.get("NO_CLINE_PRED"):
results_dict["p_preds_cud"] = torch.ones_like(results_dict["p_preds_cud"]) * -math.inf
if self.cfg.MODEL.get("NO_HLINE_PRED"):
results_dict["p_preds_xy"][:, :, :, 1] = torch.ones_like(results_dict["p_preds_xy"][:, :, :, 1]) * -math.inf
if self.cfg.MODEL.get("NO_VLINE_PRED"):
results_dict["p_preds_xy"][:, :, :, 0] = torch.ones_like(results_dict["p_preds_xy"][:, :, :, 0]) * -math.inf
losses = self.calculate_loss(input, results_dict)
return losses, results_dict
def calculate_loss(self, input, output):
device = input["e_img"].device
xLabels = input["xLabels"].to(device)
yLabels = input["yLabels"].to(device)
cUpLabels = input["cUpLabels"].to(device)
cDownLabels = input["cDownLabels"].to(device)
losses = {
"total": 0.0
}
# 附加loss:仅供debug使用,不会计入总量!
losses["extra_xLabels"] = 0.0
losses["extra_yLabels"] = 0.0
losses["extra_cUpLabels"] = 0.0
losses["extra_cDownLabels"] = 0.0
# 六个perspective loss
for view_idx in range(output["p_preds_xy"].shape[1]):
one_loss_x = []
one_loss_y = []
one_loss_c_up = []
one_loss_c_down = []
for img_idx in range(output["p_preds_xy"].shape[0]):
the_onepred_xy = output["p_preds_xy"][img_idx, view_idx]
if not self.cfg.MODEL.get("NO_VLINE_PRED"):
the_oneloss_x = F.binary_cross_entropy_with_logits(the_onepred_xy[:, 0], xLabels[img_idx, view_idx])
one_loss_x.append(the_oneloss_x)
if not self.cfg.MODEL.get("NO_HLINE_PRED"):
the_oneloss_y = F.binary_cross_entropy_with_logits(the_onepred_xy[:, 1], yLabels[img_idx, view_idx])
one_loss_y.append(the_oneloss_y)
if not self.cfg.MODEL.get("NO_CLINE_PRED"):
the_onepred_cud = output["p_preds_cud"][img_idx, view_idx]
the_oneloss_c_up = F.binary_cross_entropy_with_logits(the_onepred_cud[:, 0],
cUpLabels[img_idx, view_idx])
one_loss_c_up.append(the_oneloss_c_up)
the_oneloss_c_down = F.binary_cross_entropy_with_logits(the_onepred_cud[:, 1],
cDownLabels[img_idx, view_idx])
one_loss_c_down.append(the_oneloss_c_down)
one_loss_x = (torch.stack(one_loss_x) if len(one_loss_x) > 0 else output["p_preds_xy"].new_tensor([])) \
.sum() / output["p_preds_xy"].shape[0] # 求和再除以batchsize,而不是求平均,以保证每个图片对loss的贡献相同
one_loss_y = (torch.stack(one_loss_y) if len(one_loss_y) > 0 else output["p_preds_xy"].new_tensor([])) \
.sum() / output["p_preds_xy"].shape[0]
one_loss_c_up = (torch.stack(one_loss_c_up) if len(one_loss_c_up) > 0 else output["p_preds_cud"].new_tensor(
[])).sum() / output["p_preds_xy"].shape[0]
one_loss_c_down = (torch.stack(one_loss_c_down) if len(one_loss_c_down) > 0 else output[
"p_preds_cud"].new_tensor([])).sum() / output["p_preds_xy"].shape[0]
with torch.no_grad():
losses["extra_xLabels"] += one_loss_x
losses["extra_yLabels"] += one_loss_y
losses["extra_cUpLabels"] += one_loss_c_up
losses["extra_cDownLabels"] += one_loss_c_down
one_loss = one_loss_x + one_loss_y + one_loss_c_up + one_loss_c_down
losses["p_" + VIEW_NAME[view_idx]] = one_loss
losses["total"] += self.cfg.MODEL.get("LOSS", {}).get("ALPHA_PERSPECTIVE", 1.0) * one_loss
return losses
| 11,875 | 45.031008 | 120 | py |
DMH-Net | DMH-Net-main/drn.py | import math
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
BatchNorm = nn.BatchNorm2d
# __all__ = ['DRN', 'drn26', 'drn42', 'drn58']
webroot = 'http://dl.yf.io/drn/'
model_urls = {
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'drn-c-26': webroot + 'drn_c_26-ddedf421.pth',
'drn-c-42': webroot + 'drn_c_42-9d336e8c.pth',
'drn-c-58': webroot + 'drn_c_58-0a53a92c.pth',
'drn-d-22': webroot + 'drn_d_22-4bd2f8ea.pth',
'drn-d-38': webroot + 'drn_d_38-eebb45f0.pth',
'drn-d-54': webroot + 'drn_d_54-0e0534ff.pth',
'drn-d-105': webroot + 'drn_d_105-12b40979.pth'
}
def conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=padding, bias=False, dilation=dilation)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
dilation=(1, 1), residual=True):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride,
padding=dilation[0], dilation=dilation[0])
self.bn1 = BatchNorm(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes,
padding=dilation[1], dilation=dilation[1])
self.bn2 = BatchNorm(planes)
self.downsample = downsample
self.stride = stride
self.residual = residual
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
if self.residual:
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
dilation=(1, 1), residual=True):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=dilation[1], bias=False,
dilation=dilation[1])
self.bn2 = BatchNorm(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class DRN(nn.Module):
def __init__(self, block, layers, num_classes=1000,
channels=(16, 32, 64, 128, 256, 512, 512, 512),
out_map=False, out_middle=False, pool_size=28, arch='D'):
super(DRN, self).__init__()
self.inplanes = channels[0]
self.out_map = out_map
self.out_dim = channels[-1]
self.out_middle = out_middle
self.arch = arch
if arch == 'C':
self.conv1 = nn.Conv2d(3, channels[0], kernel_size=7, stride=1,
padding=3, bias=False)
self.bn1 = BatchNorm(channels[0])
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(
BasicBlock, channels[0], layers[0], stride=1)
self.layer2 = self._make_layer(
BasicBlock, channels[1], layers[1], stride=2)
elif arch == 'D':
self.layer0 = nn.Sequential(
nn.Conv2d(3, channels[0], kernel_size=7, stride=1, padding=3,
bias=False),
BatchNorm(channels[0]),
nn.ReLU(inplace=True)
)
self.layer1 = self._make_conv_layers(
channels[0], layers[0], stride=1)
self.layer2 = self._make_conv_layers(
channels[1], layers[1], stride=2)
self.layer3 = self._make_layer(block, channels[2], layers[2], stride=2)
self.layer4 = self._make_layer(block, channels[3], layers[3], stride=2)
self.layer5 = self._make_layer(block, channels[4], layers[4],
dilation=2, new_level=False)
self.layer6 = None if layers[5] == 0 else \
self._make_layer(block, channels[5], layers[5], dilation=4,
new_level=False)
if arch == 'C':
self.layer7 = None if layers[6] == 0 else \
self._make_layer(BasicBlock, channels[6], layers[6], dilation=2,
new_level=False, residual=False)
self.layer8 = None if layers[7] == 0 else \
self._make_layer(BasicBlock, channels[7], layers[7], dilation=1,
new_level=False, residual=False)
elif arch == 'D':
self.layer7 = None if layers[6] == 0 else \
self._make_conv_layers(channels[6], layers[6], dilation=2)
self.layer8 = None if layers[7] == 0 else \
self._make_conv_layers(channels[7], layers[7], dilation=1)
if num_classes > 0:
self.avgpool = nn.AvgPool2d(pool_size)
self.fc = nn.Conv2d(self.out_dim, num_classes, kernel_size=1,
stride=1, padding=0, bias=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1,
new_level=True, residual=True):
assert dilation == 1 or dilation % 2 == 0
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm(planes * block.expansion),
)
layers = list()
layers.append(block(
self.inplanes, planes, stride, downsample,
dilation=(1, 1) if dilation == 1 else (
dilation // 2 if new_level else dilation, dilation),
residual=residual))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, residual=residual,
dilation=(dilation, dilation)))
return nn.Sequential(*layers)
def _make_conv_layers(self, channels, convs, stride=1, dilation=1):
modules = []
for i in range(convs):
modules.extend([
nn.Conv2d(self.inplanes, channels, kernel_size=3,
stride=stride if i == 0 else 1,
padding=dilation, bias=False, dilation=dilation),
BatchNorm(channels),
nn.ReLU(inplace=True)])
self.inplanes = channels
return nn.Sequential(*modules)
def forward(self, x):
y = list()
if self.arch == 'C':
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
elif self.arch == 'D':
x = self.layer0(x)
x = self.layer1(x)
# y.append(x)
x = self.layer2(x)
y.append(x)
x = self.layer3(x)
y.append(x)
x = self.layer4(x)
y.append(x)
x = self.layer5(x)
y.append(x)
if self.layer6 is not None:
x = self.layer6(x)
y.append(x)
if self.layer7 is not None:
x = self.layer7(x)
# y.append(x)
if self.layer8 is not None:
x = self.layer8(x)
# y.append(x)
if self.out_map:
x = self.fc(x)
else:
x = x
# x = self.avgpool(x)
# x = self.fc(x)
# x = x.view(x.size(0), -1)
if self.out_middle:
return x, y
else:
return x
class DRN_A(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(DRN_A, self).__init__()
self.out_dim = 512 * block.expansion
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1,
dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1,
dilation=4)
self.avgpool = nn.AvgPool2d(28, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, BatchNorm):
m.weight.data.fill_(1)
m.bias.data.zero_()
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
# elif isinstance(m, nn.BatchNorm2d):
# nn.init.constant_(m.weight, 1)
# nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,
dilation=(dilation, dilation)))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def drn_a_50(pretrained=False, **kwargs):
model = DRN_A(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def drn_c_26(pretrained=False, **kwargs):
model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 1, 1], arch='C', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-c-26']))
return model
def drn_c_42(pretrained=False, **kwargs):
model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 1, 1], arch='C', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-c-42']))
return model
def drn_c_58(pretrained=False, **kwargs):
model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 1, 1], arch='C', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-c-58']))
return model
def drn_d_22(pretrained=False, **kwargs):
model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 1, 1], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-22']))
return model
def drn_d_24(pretrained=False, **kwargs):
model = DRN(BasicBlock, [1, 1, 2, 2, 2, 2, 2, 2], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-24']))
return model
def drn_d_38(pretrained=False, **kwargs):
model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 1, 1], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-38']))
return model
def drn_d_40(pretrained=False, **kwargs):
model = DRN(BasicBlock, [1, 1, 3, 4, 6, 3, 2, 2], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-40']))
return model
def drn_d_54(pretrained=False, **kwargs):
model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 1, 1], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-54']))
return model
def drn_d_56(pretrained=False, **kwargs):
model = DRN(Bottleneck, [1, 1, 3, 4, 6, 3, 2, 2], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-56']))
return model
def drn_d_105(pretrained=False, **kwargs):
model = DRN(Bottleneck, [1, 1, 3, 4, 23, 3, 1, 1], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-105']))
return model
def drn_d_107(pretrained=False, **kwargs):
model = DRN(Bottleneck, [1, 1, 3, 4, 23, 3, 2, 2], arch='D', **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['drn-d-107']))
return model | 14,207 | 33.236145 | 88 | py |
DMH-Net | DMH-Net-main/layers.py | import math
import torch
import torch.nn as nn
class PerspectiveE2PP2E(nn.Module):
def __init__(self, cfg, input_h, input_w, pers_h, fov, input_feat, output_feat, hough_angles_num=180,
hoguh_clines_tole=1.0):
super(PerspectiveE2PP2E, self).__init__()
self.cfg = cfg
self.hoguh_clines_tole = hoguh_clines_tole
self.hough_angles_num = hough_angles_num
self.input_h = input_h
self.input_w = input_w
self.pers_h = pers_h
self.fov = fov
self.input_feat = input_feat
dim = input_feat // self.cfg.MODEL.get("CONV1_CHANNEL_FACTOR", 2)
# conv1构建
self.conv1_x = nn.Sequential(nn.Conv2d(input_feat, dim, kernel_size=(1, 1), padding=(0, 0)),
nn.BatchNorm2d(dim),
nn.ReLU()
)
self.conv1_cup = nn.Sequential(nn.Conv2d(input_feat, dim, kernel_size=(1, 1), padding=(0, 0)),
nn.BatchNorm2d(dim),
nn.ReLU()
)
self.conv1_cdown = nn.Sequential(nn.Conv2d(input_feat, dim, kernel_size=(1, 1), padding=(0, 0)),
nn.BatchNorm2d(dim),
nn.ReLU()
)
self.conv1_y = nn.Sequential(nn.Conv2d(input_feat, dim, kernel_size=(1, 1), padding=(0, 0)),
nn.BatchNorm2d(dim),
nn.ReLU()
)
# conv2构建
self.conv2_x = nn.Sequential(nn.Conv2d(dim, dim, kernel_size=(3, 1), padding=(1, 0)),
nn.BatchNorm2d(dim),
nn.ReLU(),
nn.Conv2d(dim, dim, kernel_size=(3, 1), padding=(1, 0)),
nn.BatchNorm2d(dim),
nn.ReLU()
)
self.conv2_cup = nn.Sequential(nn.Conv2d(dim, dim, kernel_size=(3, 1), padding=(1, 0)),
nn.BatchNorm2d(dim),
nn.ReLU(),
nn.Conv2d(dim, dim, kernel_size=(3, 1), padding=(1, 0)),
nn.BatchNorm2d(dim),
nn.ReLU()
)
self.conv2_cdown = nn.Sequential(nn.Conv2d(dim, dim, kernel_size=(3, 1), padding=(1, 0)),
nn.BatchNorm2d(dim),
nn.ReLU(),
nn.Conv2d(dim, dim, kernel_size=(3, 1), padding=(1, 0)),
nn.BatchNorm2d(dim),
nn.ReLU()
)
self.conv2_y = nn.Sequential(nn.Conv2d(dim, dim, kernel_size=(3, 1), padding=(1, 0)),
nn.BatchNorm2d(dim),
nn.ReLU(),
nn.Conv2d(dim, dim, kernel_size=(3, 1), padding=(1, 0)),
nn.BatchNorm2d(dim),
nn.ReLU()
)
# 过中心点的线的霍夫投票所用的矩阵
self.vote_mask_c_up = None
self.vote_mask_c_down = None
def makeVoteMask(self, img_size, device):
vote_mask_c_up, vote_mask_c_down = self.makeVoteMaskStatic(self.cfg.MODEL.HOUGH.CLINE_TYPE, img_size, device,
self.hough_angles_num, self.hoguh_clines_tole)
# 解决显存爆炸:转为(h*w, 180)的矩阵,与特征做矩阵乘法
self.vote_mask_c_up = vote_mask_c_up.reshape(-1, vote_mask_c_up.shape[-1])
self.vote_mask_c_down = vote_mask_c_down.reshape(-1, vote_mask_c_down.shape[-1])
@staticmethod
def makeVoteMaskStatic(type: str, img_size, device, hough_angles_num=180, hoguh_clines_tole=1.0):
if type == "NEW":
def scatterResult(input: torch.Tensor, dim: int) -> torch.Tensor:
result = torch.zeros(*input.shape, input.shape[dim], device="cpu", dtype=torch.float64)
input = input.unsqueeze(-1).transpose(dim, -1)
integer_part = torch.floor(input).to(torch.int64)
decimal_part = input - integer_part
result.scatter_add_(dim, integer_part, 1 - decimal_part)
result.scatter_add_(dim, torch.ceil(input).to(torch.int64), decimal_part)
return result
# 规则:对边缘的每个像素,对应于一个角度,例如512*512的图,上半部分就会对应2*256+512-2=1022个角度
# 每个角度往图片中心做连线,每个角度都固定是由256个像素点加和
# 对于线不正好穿过像素中心的情况,则实施线性插值
#
# 1022维的方向:上半圆从最左侧,顺时针增加至最右侧;下半圆从最右侧,顺时针增加至最左侧。
with torch.no_grad():
h2, w2 = (img_size[0] - 1) / 2, (img_size[1] - 1) / 2
rangeX = torch.arange(img_size[1], device="cpu", dtype=torch.float64)
rangeY = torch.arange(img_size[0], device="cpu", dtype=torch.float64)
# 计算:左右边上的每个点,向中心连线,经过的每个x位置,对应的y值
lr_mat = (torch.abs(w2 - rangeX) / w2).unsqueeze(0) * (rangeY - h2).unsqueeze(1) + h2 # 大小为(512,512)
lr_res = scatterResult(lr_mat, 0)
l_res = torch.cat((lr_res[:, 0:math.ceil(img_size[1] / 2)],
torch.zeros((lr_res.shape[0], img_size[1] // 2, lr_res.shape[2]), device="cpu",
dtype=torch.float64)),
dim=1)
r_res = torch.cat((torch.zeros((lr_res.shape[0], img_size[1] // 2, lr_res.shape[2]), device="cpu",
dtype=torch.float64),
lr_res[:, img_size[1] // 2:]),
dim=1)
# 计算:上下边上的每个点,向中心连线,经过的每个y位置,对应的x值
ud_mat = (torch.abs(h2 - rangeY) / h2).unsqueeze(1) * (rangeX - w2).unsqueeze(0) + w2 # 大小为(512,512)
ud_res = scatterResult(ud_mat, 1)
# 拼接组合出最终结果
h2f, h2c = img_size[0] // 2, math.ceil(img_size[0] / 2)
vote_mask_c_up = torch.cat([l_res[:h2c, :, 1:h2f].flip([2]), ud_res[:h2c], r_res[:h2c, :, 1:h2f]],
dim=2)
vote_mask_c_down = torch.cat(
[r_res[h2f:, :, h2c:-1], ud_res[h2f:].flip([2]), l_res[h2f:, :, h2c:-1].flip([2])],
dim=2)
vote_mask_c_up = torch.cat(
[vote_mask_c_up.to(torch.float32),
torch.zeros((h2f, *vote_mask_c_up.shape[1:]), device="cpu", dtype=torch.float32)], dim=0)
vote_mask_c_down = torch.cat(
[torch.zeros((h2f, *vote_mask_c_down.shape[1:]), device="cpu", dtype=torch.float32),
vote_mask_c_down.to(torch.float32)], dim=0)
else:
raise NotImplementedError()
return vote_mask_c_up.to(device).contiguous(), vote_mask_c_down.to(device).contiguous()
def forward(self, pers):
# conv1
featmap_x = self.conv1_x(pers)
featmap_cup = self.conv1_cup(pers)
featmap_cdown = self.conv1_cdown(pers)
featmap_y = self.conv1_y(pers)
hough_x_vote = featmap_x.sum(dim=2, keepdim=True)
hough_x_vote_reshape = hough_x_vote.reshape(hough_x_vote.shape[0], -1, self.pers_h)
hough_y_vote = featmap_y.sum(dim=3, keepdim=True)
hough_y_vote_reshape = hough_y_vote.reshape(hough_y_vote.shape[0], -1, self.pers_h)
# 中心线投票
if self.vote_mask_c_up is None:
self.makeVoteMask(featmap_cup.shape[2:4], featmap_cup.device)
hough_c_up_vote = torch.matmul(featmap_cup.reshape(*featmap_cup.shape[0:2], -1), self.vote_mask_c_up)
hough_c_down_vote = torch.matmul(featmap_cdown.reshape(*featmap_cdown.shape[0:2], -1), self.vote_mask_c_down)
# conv2: conv in hough space
hough_feat = torch.cat(
[self.conv2_x(hough_x_vote_reshape.unsqueeze(-1)), self.conv2_y(hough_y_vote_reshape.unsqueeze(-1))],
dim=3)
hough_feat_cud = torch.cat(
[self.conv2_cup(hough_c_up_vote.unsqueeze(-1)), self.conv2_cdown(hough_c_down_vote.unsqueeze(-1))],
dim=3)
return hough_feat, hough_feat_cud
def __repr__(self):
return "FeatureShape(H={}, W={}, C={}), Perspective Length (distance_bin_num={}, fov={})".format(
self.input_h, self.input_w, self.input_feat, self.pers_h, self.fov)
class HoughNewUpSampler(nn.Module):
def __init__(self, upsample_rate: int):
super().__init__()
self.ul = nn.Upsample(scale_factor=(upsample_rate, 1), mode='bilinear', align_corners=False)
self.um = nn.Upsample(scale_factor=(upsample_rate, 1), mode='bilinear', align_corners=False)
self.ur = nn.Upsample(scale_factor=(upsample_rate, 1), mode='bilinear', align_corners=False)
def forward(self, x):
# 仅适用于偶数尺寸正方形图片的处理(因为原始图片的宽和高没有传进来,这里就默认为正方形来做上采样了)
assert (x.shape[2] + 2) % 4 == 0, "仅适用于偶数尺寸正方形图片的处理"
img_half_size = (x.shape[2] + 2) // 4
l = self.ul(x[:, :, 0:img_half_size])
m = self.um(x[:, :, img_half_size - 1:3 * img_half_size - 1])
r = self.ur(x[:, :, 3 * img_half_size - 2:])
return torch.cat([l[:, :, :-1], m, r[:, :, 1:]], dim=2)
class FusionHoughStage(nn.Module):
def __init__(self, cfg, type: str, c_ori, c0, c1, c2, c3, c4, upsample_rate=None, upsampler_class=None):
super(FusionHoughStage, self).__init__()
self.type = type
self.cfg = cfg
if upsample_rate is None:
upsample_rate = [2, 4, 8, 8, 8]
def getSampler(u):
if u == 1:
return nn.Identity()
elif upsampler_class is not None:
return upsampler_class(u)
else:
return nn.Upsample(scale_factor=(u, 1), mode='bilinear', align_corners=False)
self.upsamplers = nn.ModuleList([
getSampler(u) for u in upsample_rate
])
self.c_total = c0 + c1 + c2 + c3 + c4
self.conv1 = nn.Sequential(
nn.Conv2d(self.c_total, self.c_total // 2, kernel_size=(3, 1), padding=(1, 0)),
nn.BatchNorm2d(self.c_total // 2),
nn.ReLU(),
nn.Conv2d(self.c_total // 2, self.c_total // 2, kernel_size=(1, 1), padding=(0, 0)),
nn.BatchNorm2d(self.c_total // 2),
nn.ReLU(),
nn.Conv2d(self.c_total // 2, 1, kernel_size=(1, 1), padding=(0, 0)),
)
self.conv1_2 = nn.Sequential(
nn.Conv2d(self.c_total, self.c_total // 2, kernel_size=(3, 1), padding=(1, 0)),
nn.BatchNorm2d(self.c_total // 2),
nn.ReLU(),
nn.Conv2d(self.c_total // 2, self.c_total // 2, kernel_size=(1, 1), padding=(0, 0)),
nn.BatchNorm2d(self.c_total // 2),
nn.ReLU(),
nn.Conv2d(self.c_total // 2, 1, kernel_size=(1, 1), padding=(0, 0)),
)
self.use_different_conv1 = True
# self.conv2 = nn.Sequential(nn.Conv2d(self.c_total // 2, 1, kernel_size=(1, 1), padding=(0, 0), bias=False))
def forward(self, x):
concat_feat = torch.cat([sam(t) for t, sam in zip(x, self.upsamplers)], 1)
if self.use_different_conv1:
feat = torch.cat([self.conv1(concat_feat[:, :, :, 0:1]), self.conv1_2(concat_feat[:, :, :, 1:2])], dim=3)
else:
feat = self.conv1(concat_feat)
prob = feat # self.conv2(feat)
# concat_feat = concat_feat.permute(0,2,1,3).reshape(f_ori.shape[0], 256, -1)
# prob = self.linear(concat_feat).unsqueeze(1)
return prob
| 12,016 | 49.491597 | 117 | py |
DMH-Net | DMH-Net-main/config.py | import warnings
from pathlib import Path
import yaml
from easydict import EasyDict
def log_config_to_file(cfg, pre='cfg', logger=None):
for key, val in cfg.items():
if isinstance(cfg[key], EasyDict):
logger.info('\n%s.%s = edict()' % (pre, key))
log_config_to_file(cfg[key], pre=pre + '.' + key, logger=logger)
continue
logger.info('%s.%s: %s' % (pre, key, val))
def cfg_from_list(cfg_list, config):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = config
for subkey in key_list[:-1]:
if subkey not in d: d[subkey] = {}
# assert subkey in d, 'NotFoundKey: %s' % subkey
d = d[subkey]
subkey = key_list[-1]
# assert subkey in d, 'NotFoundKey: %s' % subkey
try:
value = literal_eval(v)
except:
value = v
if subkey in d and type(value) != type(d[subkey]) and isinstance(d[subkey], EasyDict):
key_val_list = value.split(',')
for src in key_val_list:
cur_key, cur_val = src.split(':')
val_type = type(d[subkey][cur_key])
cur_val = val_type(cur_val)
d[subkey][cur_key] = cur_val
elif subkey in d and type(value) != type(d[subkey]) and isinstance(d[subkey], list):
val_list = value.split(',')
for k, x in enumerate(val_list):
val_list[k] = type(d[subkey][0])(x)
d[subkey] = val_list
else:
# assert type(value) == type(d[subkey]), \
# 'type {} does not match original type {}'.format(type(value), type(d[subkey]))
if subkey in d and type(value) != type(d[subkey]):
warnings.warn('type {} does not match original type {}'.format(type(value), type(d[subkey])))
d[subkey] = value
def merge_new_config(config, new_config):
if '_BASE_CONFIG_' in new_config:
with open(new_config['_BASE_CONFIG_'], 'r') as f:
try:
yaml_config = yaml.load(f, Loader=yaml.FullLoader)
except:
yaml_config = yaml.load(f)
config.update(EasyDict(yaml_config))
for key, val in new_config.items():
if not isinstance(val, dict):
config[key] = val
continue
if key not in config:
config[key] = EasyDict()
merge_new_config(config[key], val)
return config
def cfg_from_yaml_file(cfg_file, config):
with open(cfg_file, 'r') as f:
try:
new_config = yaml.load(f, Loader=yaml.FullLoader)
except:
new_config = yaml.load(f)
merge_new_config(config=config, new_config=new_config)
return config
cfg = EasyDict()
cfg.ROOT_DIR = (Path(__file__).resolve().parent / '../').resolve()
cfg.LOCAL_RANK = 0
| 3,026 | 32.633333 | 109 | py |
DMH-Net | DMH-Net-main/eval.py | import argparse
import json
import os
# import ipdb
import sys
import time
import warnings
from pathlib import Path
import cv2
import numpy as np
import torch
import torch.nn as nn
import yaml
from torch.utils.data import DataLoader
from tqdm import trange
from config import cfg, cfg_from_yaml_file, cfg_from_list, merge_new_config
from misc.utils import pipeload
from model import ENCODER_RESNET, ENCODER_DENSENET, ENCODER_HOUGH, DMHNet
from perspective_dataset import PerspectiveDataset, worker_init_fn
from postprocess.postprocess2 import postProcess
from visualization import visualizeWithPostResults
from torch.nn import functional as F
SAVE_JSON = False
def valid(cfg, net, loader_valid, dataset_valid, device, visualize_count=0, show=False, dpi=None, print_detail=False,
valid_epoch=None):
"""
验证用的核心函数
:param valid_epoch None表示是test,数字表示是valid,值表示触发valid的epoch number
"""
net.eval()
# torch.cuda.empty_cache()
iterator_valid = iter(loader_valid)
valid_loss = {}
metrics = {}
metrics_by_corner = {}
imgs = {}
visualize_index = np.zeros(len(loader_valid), dtype=bool)
visualize_index[np.random.choice(len(loader_valid), size=visualize_count, replace=False)] = True
for valid_idx in trange(len(loader_valid), desc='Eval', position=2):
input = next(iterator_valid)
valid_batch_size = input["e_img"].size(0)
with torch.no_grad():
for k in input:
if isinstance(input[k], torch.Tensor):
input[k] = input[k].to(device)
losses, results_dict = net(input)
postResults = []
for i in range(len(input["filename"])):
print(input["filename"][i])
postStartTime = time.time()
postResult = postProcess(cfg, input, results_dict, i, is_valid_mode=valid_epoch is not None)
postResults.append(postResult)
if print_detail:
(_, gt_lwh, _), (_, pred_lwh, _), metric = postResult
print("{:s} pred{:s} gt{:s} {:s}".format(str(metric), str(pred_lwh), str(gt_lwh),
input["filename"][i]))
if SAVE_JSON or ("args" in globals() and args.print_json):
(_, _, _), (_, _, pred_cors), metric = postResult
uv = pred_cors.cpu().numpy() / input["e_img"].shape[-1:-3:-1]
uv = [[o.item() for o in pt] for pt in uv]
if SAVE_JSON:
JSON_DIR = "./result_json"
os.makedirs(JSON_DIR, exist_ok=True)
with open(os.path.join(JSON_DIR, input["filename"][i] + ".json"), "w") as f:
json.dump({"uv": uv, "3DIoU": metric["3DIoU"].item()}, f)
elif "args" in globals() and args.print_json:
print(json.dumps({"uv": uv, "3DIoU": metric["3DIoU"].item()}))
_, _, metric = postResult
for k, v in metric.items():
if isinstance(v, str): continue
metrics[k] = metrics.get(k, 0) + v.item()
if "n_corners_type" in metric:
k2 = metric["n_corners_type"] + "/" + k
if k2 not in metrics_by_corner: metrics_by_corner[k2] = []
metrics_by_corner[k2].append(v.item())
metrics["gt_n_corners"] = metrics.get("gt_n_corners", 0) + (len(input["cor"][i]) // 2)
for k, v in losses.items():
valid_loss[k] = valid_loss.get(k, 0) + v.item() * valid_batch_size
if visualize_index[valid_idx]:
visualize_type = cfg.get("VISUALIZATION", {}).get("TYPE")
imgs.update(visualizeWithPostResults(cfg, input, results_dict, postResults, drawtypes=visualize_type,
show=show, dpi=dpi))
for k, v in valid_loss.items():
valid_loss[k] = v / len(dataset_valid)
for k, v in metrics.items():
metrics[k] = v / len(dataset_valid)
for k, v in metrics_by_corner.items():
metrics[k] = torch.tensor(v).mean().item()
return valid_loss, imgs, metrics
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--cfg_file', type=str, required=True, help='specify the config for training')
parser.add_argument('--ckpt', required=True, help='checkpoint for evaluation')
parser.add_argument('--visu_count', default=0, type=int, help='visualize how many batches')
parser.add_argument('--visu_all', action='store_true', help='visualize all samples')
parser.add_argument('--visu_path', help='where to save the visualization result (default: plt.show)')
parser.add_argument('--visu_type',
help='specify visualization type (either str or List[str], see visualization.py)')
parser.add_argument('--no_post_process', action='store_true', help='don\'t post process')
parser.add_argument('--develop_post_process', action='store_true', help='use POST_PROCESS.METHOD = \'develop\'')
parser.add_argument('--valid_set', action='store_true', help='use valid set')
parser.add_argument('--batch_size', default=2, type=int, help='mini-batch size')
parser.add_argument('--input_file', type=str, help='eval on one single input image')
parser.add_argument('--print_detail', action='store_true', help='print detail for each sample')
parser.add_argument('--save_json', action='store_true', help='save json to ./result_json')
parser.add_argument('--print_json', action='store_true', help='print json for each sample')
parser.add_argument('--output_file', nargs="?", const=True,
help='whether to output to file 如果不填写参数,默认输出到eval_outputs/{time}.out')
# Model related
parser.add_argument('--backbone',
default='drn38',
choices=ENCODER_RESNET + ENCODER_DENSENET + ENCODER_HOUGH,
help='backbone of the network')
parser.add_argument('--no_rnn', action='store_true', help='whether to remove rnn or not')
# Dataset related arguments
# TODO 原始代码交换了测试集与训练集 没有验证集
# 新代码用的就是原始的训练集和测试集
# parser.add_argument('--train_root_dir',
# default='data/layoutnet_dataset/test',
# help='root directory to training dataset. '
# 'should contains img, label_cor subdirectories')
parser.add_argument('--valid_root_dir',
default='data/layoutnet_dataset/train',
help='root directory to validation dataset. '
'should contains img, label_cor subdirectories')
parser.add_argument('--num_workers', default=4 if not sys.gettrace() else 0, type=int,
help='numbers of workers for dataloaders')
# Misc arguments
parser.add_argument('--no_cuda', action='store_true', help='disable cuda')
parser.add_argument('--seed', default=594277, type=int, help='manual seed')
parser.add_argument('--disp_iter', type=int, default=1, help='iterations frequency to display')
parser.add_argument('--no_multigpus', action='store_true', help='disable data parallel')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
args = parser.parse_args()
if args.save_json: SAVE_JSON = True
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
output_file = args.output_file if args.output_file != True else "eval_outputs/{:d}.out".format(int(time.time()))
if output_file is not None:
os.makedirs(os.path.dirname(output_file), exist_ok=True)
output_file = open(output_file, "w")
if args.visu_type:
merge_new_config(cfg, {"VISUALIZATION": {"TYPE": yaml.safe_load(args.visu_type)}})
if args.no_post_process:
cfg.POST_PROCESS.METHOD = "None"
elif args.develop_post_process:
cfg.POST_PROCESS.METHOD = "develop"
device = torch.device('cpu' if args.no_cuda else 'cuda')
np.random.seed(args.seed)
torch.manual_seed(args.seed)
result_dir = os.path.join("eval_result", str(int(time.time())))
os.makedirs(result_dir, exist_ok=True)
# Create dataloader
print("num_workers: " + str(args.num_workers))
dataset_valid = PerspectiveDataset(cfg, "test" if not args.valid_set else "valid", # TODO 新代码现在是用测试集进行验证的
filename=args.input_file)
loader_valid = DataLoader(dataset_valid,
args.batch_size,
collate_fn=dataset_valid.collate,
shuffle=False,
drop_last=False,
num_workers=args.num_workers,
pin_memory=not args.no_cuda,
worker_init_fn=worker_init_fn)
# Create model
net = DMHNet(cfg, cfg.MODEL.get("BACKBONE", {}).get("NAME", "drn38"), not args.no_rnn).to(device)
if not args.no_multigpus:
net = nn.DataParallel(net) # multi-GPU
print(str(cfg.POST_PROCESS))
if output_file: output_file.write(str(cfg.POST_PROCESS) + "\n\n")
if args.ckpt == "None":
warnings.warn("ckpt参数显式传入了None!将不会加载任何参数!")
else:
state_dict = pipeload(args.ckpt, map_location='cpu')["state_dict"]
net.load_state_dict(state_dict, strict=True)
visualize_count = len(loader_valid) if args.visu_all else args.visu_count
show = args.visu_path is None
valid_loss, imgs, metrics = valid(cfg, net, loader_valid, dataset_valid, device, visualize_count, show=show,
dpi=200, print_detail=args.print_detail)
for k, v in valid_loss.items():
k = 'eval_loss/%s' % k
print("{:s} {:f}".format(k, v))
if output_file: output_file.write("{:s} {:f}".format(k, v) + "\n")
for k, v in metrics.items():
k = 'metric/%s' % k
print("{:s} {:f}".format(k, v))
if output_file: output_file.write("{:s} {:f}".format(k, v) + "\n")
if output_file:
output_file.write("\n\n")
output_file.write(str(cfg) + "\n")
for k, v in imgs.items():
if args.visu_path:
os.makedirs(args.visu_path, exist_ok=True)
success = cv2.imwrite(os.path.join(args.visu_path, k + ".jpg"), cv2.cvtColor(v, cv2.COLOR_RGB2BGR))
assert success, "write output image fail!"
| 10,926 | 45.300847 | 117 | py |
DMH-Net | DMH-Net-main/perspective_dataset.py | import os
import warnings
import numpy as np
import torch
import torch.utils.data as data
from PIL import Image
from easydict import EasyDict
from scipy.spatial.distance import cdist
from shapely.geometry import LineString
from torch.utils.data._utils.collate import default_collate
from torchvision.transforms import transforms
from e2plabel.e2plabelconvert import generatePerspective, linesPostProcess
from misc import panostretch
from misc import post_proc
class PerspectiveDataset(data.Dataset):
def __init__(self,
cfg: EasyDict,
split: str,
filename=None,
train_mode=False):
self.cfg = cfg
self.train_mode = train_mode
self.rotate = None # rotate # TODO 回原版代码核对一下 rotate怎么实现的?
self.H, self.W = (512, 1024) if "IMG_SIZE" not in cfg.DATA else cfg.DATA.IMG_SIZE
self.FOV = 90 if "FOV" not in cfg.DATA else cfg.DATA.FOV
self.P = 512 if "PERSPECTIVE_SIZE" not in cfg.DATA else cfg.DATA.PERSPECTIVE_SIZE
self.bin_num = 512
self.hough_label_gradual_type = "exp"
# e2p参数设定 详见e2plabelconvert.py
self.view_args = [
[(self.FOV, self.FOV), 0, 0],
[(self.FOV, self.FOV), 90, 0],
[(self.FOV, self.FOV), 180, 0],
[(self.FOV, self.FOV), -90, 0],
[(self.FOV, self.FOV), 0, 90],
[(self.FOV, self.FOV), 0, -90],
]
self.view_name = ['F', 'R', 'B', 'L', 'U', 'D']
self.view_size = (self.P, self.P)
self.ch = -1.6
self.randomEraser = transforms.RandomErasing()
# self._check_dataset()
root_dir = cfg.DATA.ROOT_DIR
self.path = os.path.join(root_dir, split)
self.img_dir = os.path.join(self.path, 'img')
self.cor_dir = os.path.join(self.path, 'label_cor')
self.img_fnames = sorted([fname for fname in os.listdir(self.img_dir)])
if cfg.DATA.get("PREFIX") is not None:
self.img_fnames = [fname for fname in self.img_fnames
if sum([1 if fname.find(p) == 0 else 0 for p in cfg.DATA.PREFIX]) > 0 # 确保图片符合某一prefix
]
if filename is not None: # 只使用单一的一张图片的情况
self.img_fnames = [filename]
# 读取每个图片的点label数据,预保存在内存里从而进行二次过滤
self.cors = []
for filename in self.img_fnames:
with open(os.path.join(self.cor_dir, filename[:-4] + ".txt")) as f:
cor = np.array([line.strip().split() for line in f if line.strip()], np.float32)
self.cors.append(cor)
# 如果配置中对角点的个数有要求,执行过滤
if cfg.DATA.get("USE_CORNER"):
corner_count = [len(cor) // 2 for cor in self.cors]
mask = [count in cfg.DATA.USE_CORNER for count in corner_count]
self.img_fnames = [v for v, m in zip(self.img_fnames, mask) if m]
self.cors = [v for v, m in zip(self.cors, mask) if m]
# # TODO
# try:
# idx = self.img_fnames.index("TbHJrupSAjP_235d08ff9f3f40ce9fa9e97696265dda.png")
# except:
# idx = 0
# self.img_fnames = self.img_fnames[idx:idx + 1] * 1000
# self.cors = self.cors[idx: idx + 1] * 1000
# a = 1
def _check_dataset(self):
for fname in self.txt_fnames:
assert os.path.isfile(os.path.join(self.cor_dir, fname)), \
'%s not found' % os.path.join(self.cor_dir, fname)
# for fname in self.pkl_fnames:
# assert os.path.isfile(os.path.join(self.label_p_dir, fname)), \
# '%s not found' % os.path.join(self.label_p_dir, fname)
def __len__(self):
return len(self.img_fnames)
def __getitem__(self, idx):
return self.getItem(self.img_fnames[idx], self.cors[idx])
def getItem(self, filename, cor=None):
# TODO 当前的实现是基于动态由e_img生成label和六个面的的信息的
# 读取原图、角点label数据
img_path = os.path.join(self.img_dir, filename)
e_img = np.array(Image.open(img_path), np.float32)[..., :3] / 255.
if cor is None:
with open(os.path.join(self.cor_dir, filename[:-4] + ".txt")) as f:
cor = np.array([line.strip().split() for line in f if line.strip()], np.float32)
# fname = self.img_fnames[idx]
# P = self.P
# l = np.tan(np.deg2rad(self.FOV / 2))
# bin_num = self.bin_num
H = self.H
W = self.W
# Use cor make smooth angle label
# Corner with minimum x should at the beginning
cor = np.roll(cor[:, :2], -2 * np.argmin(cor[::2, 0]), 0)
# # Detect occlusion
# occlusion = find_occlusion(cor[::2].copy()).repeat(2)
AUG_RECORD = filename + " "
# 数据增强
# 只有train_mode才开数据增强
if self.train_mode or self.cfg.get("TEST_NEED_AUG", False):
# Stretch augmentation(把图片、label均进行缩放)
if self.cfg.DATA.AUGMENT.get("stretch"):
max_stretch = self.cfg.DATA.AUGMENT.stretch
if max_stretch == True: max_stretch = 2.0 # 默认值
xmin, ymin, xmax, ymax = cor2xybound(cor)
kx = np.random.uniform(0.5, max_stretch)
ky = np.random.uniform(0.5, max_stretch)
a = np.random.randint(2)
b = np.random.randint(2)
if a == 0:
kx = max(1 / kx, min(0.5 / xmin, 1.0))
else:
kx = min(kx, max(10.0 / xmax, 1.0))
if b == 0:
ky = max(1 / ky, min(0.5 / ymin, 1.0))
else:
ky = min(ky, max(10.0 / ymax, 1.0))
e_img, cor, _ = panostretch.pano_stretch(e_img, cor, kx, ky)
AUG_RECORD += "estre{:f}{:f}{:d}{:d} ".format(kx, ky, a, b)
# Random flip
if self.cfg.DATA.AUGMENT.get("flip") and np.random.randint(2) == 0:
e_img = np.flip(e_img, axis=1).copy()
cor[:, 0] = e_img.shape[1] - 1 - cor[:, 0]
AUG_RECORD += "efilp "
# Random erase in random position
if self.cfg.DATA.AUGMENT.get("erase") and np.random.randint(
self.cfg.DATA.AUGMENT.get("erase_EVERY", 2)) == 0:
# H, W = e_img.shape[:2]
n_holes = np.random.randint(self.cfg.DATA.AUGMENT.get("erase_COUNT", 10))
hole_length_y = self.cfg.DATA.AUGMENT.get("erase_SIZE", 50)
hole_length_x = self.cfg.DATA.AUGMENT.get("erase_SIZE", 50)
mask = np.ones((H, W, 3), np.float32)
noise = np.zeros((H, W, 3), np.float32)
for n in range(n_holes):
xhole = np.random.randint(W)
yhole = np.random.randint(H)
yhole1 = np.clip(yhole - hole_length_y // 2, 0, H)
yhole2 = np.clip(yhole + hole_length_y // 2, 0, H)
xhole1 = np.clip(xhole - hole_length_x // 2, 0, W)
xhole2 = np.clip(xhole + hole_length_x // 2, 0, W)
mask[yhole1:yhole2, xhole1:xhole2] = 0
noise[yhole1:yhole2, xhole1:xhole2] = np.random.rand(yhole2 - yhole1, xhole2 - xhole1, 3)
e_img = e_img * mask # + noise
if self.cfg.DATA.AUGMENT.get("bon_erase"):
# H, W = img.shape[:2]
n_holes = self.cfg.DATA.AUGMENT.get("erase_COUNT", 10) # 10
hole_length_y = self.cfg.DATA.AUGMENT.get("erase_SIZE", 50) # 50
hole_length_x = self.cfg.DATA.AUGMENT.get("erase_SIZE_X", 100) # 100
mask = np.ones((H, W, 3), np.float32)
noise = np.zeros((H, W, 3), np.float32)
bon_floor_x, bon_floor_y = cor[1::2, 0], cor[1::2, 1]
bon_ceil_x, bon_ceil_y = cor[0::2, 0], cor[0::2, 1]
bon_floor = np.interp(np.arange(W),
bon_floor_x,
bon_floor_y,
period=W)
bon_ceil = np.interp(np.arange(W), bon_ceil_x, bon_ceil_y, period=W)
for n in range(n_holes):
xhole = np.random.randint(W)
if True: # self.bon_erase:
if n % 2 == 0:
yhole = int(bon_floor[xhole])
else:
yhole = int(bon_ceil[xhole])
else: # if self.erase:
yhole = np.random.randint(H)
yhole1 = np.clip(yhole - hole_length_y // 2, 0, H)
yhole2 = np.clip(yhole + hole_length_y // 2, 0, H)
xhole1 = np.clip(xhole - hole_length_x // 2, 0, W)
xhole2 = np.clip(xhole + hole_length_x // 2, 0, W)
mask[yhole1:yhole2, xhole1:xhole2] = 0
noise[yhole1:yhole2,
xhole1:xhole2] = np.random.rand(yhole2 - yhole1,
xhole2 - xhole1, 3)
e_img = e_img * mask + noise
# Random gamma augmentation
if self.cfg.DATA.AUGMENT.get("gamma"):
p = np.random.uniform(1, 2)
if np.random.randint(2) == 0:
p = 1 / p
e_img = e_img ** p
# Random noise augmentation
if self.cfg.DATA.AUGMENT.get("noise"):
if np.random.randint(2) == 0:
noise = np.random.randn(*e_img.shape) * 0.05
e_img = np.clip(e_img + noise, 0, 1)
# TODO 把数据集存起来
# save_dir = "processed_input/4_stf"
# os.makedirs(save_dir, exist_ok=True)
# cv2.imwrite(os.path.join(save_dir, filename),
# cv2.cvtColor(np.round(e_img * 255).astype(np.uint8), cv2.COLOR_RGB2BGR))
# 到此e_img的数据增强结束,开始生成gt使用的数据
pres = generatePerspective(e_img, cor, self.view_name, self.view_args, self.view_size)
# 解析pres的内容 转换为label向量
p_imgs = []
xLabels, yLabels, cUpLabels, cDownLabels = [], [], [], []
peakss = []
liness = []
for d in pres:
p_img = torch.FloatTensor(d["img"].transpose([2, 0, 1]))
if self.train_mode or self.cfg.get("TEST_NEED_AUG", False):
AUG_RECORD += d["name"] + " "
# Stretch augmentation(把图片、label均进行缩放)
if self.cfg.DATA.get("PERSPECTIVE_AUGMENT", {}).get("stretch") and np.random.randint(2) == 0:
max_stretch = self.cfg.DATA.PERSPECTIVE_AUGMENT.stretch
if max_stretch == True: max_stretch = 1.5 # 默认值
stretch_value = np.random.uniform(1.0, max_stretch)
originSize = p_img.shape[1:]
newSize = [round(v * stretch_value) for v in originSize]
ratio = np.array([n / o for o, n in zip(originSize, newSize)])
centerPos = np.array([(v - 1) // 2 for v in originSize])
p_img = transforms.Resize(newSize)(p_img)
p_img = transforms.CenterCrop(originSize)(p_img)
for line in d["lines"]:
line[3:5] = (line[3:5] - centerPos) * ratio + centerPos
line[5:7] = (line[5:7] - centerPos) * ratio + centerPos
AUG_RECORD += "stre{:f} ".format(stretch_value)
# Random flip
if self.cfg.DATA.get("PERSPECTIVE_AUGMENT", {}).get("vertical_flip") and np.random.randint(3) == 0:
p_img = p_img.flip(-2)
for line in d["lines"]:
line[4:7:2] = p_img.shape[-2] - 1 - line[4:7:2]
AUG_RECORD += "vert "
if self.cfg.DATA.get("PERSPECTIVE_AUGMENT", {}).get("horizontal_flip") and np.random.randint(3) == 0:
p_img = p_img.flip(-1)
for line in d["lines"]:
line[3:7:2] = p_img.shape[-1] - 1 - line[3:7:2]
AUG_RECORD += "hori "
rotated = False
if self.cfg.DATA.get("PERSPECTIVE_AUGMENT", {}).get("rotation"):
r = np.random.randint(6)
if r == 0:
# 顺时针转90度
# 先转置,再水平翻转
p_img = p_img.transpose(-2, -1).flip(-1)
for line in d["lines"]:
line[3:7] = line[[4, 3, 6, 5]]
line[3:7:2] = p_img.shape[-1] - 1 - line[3:7:2]
rotated = True
AUG_RECORD += "rota0 "
elif r == 1:
# 逆时针转90度
# 先转置,再竖直翻转
p_img = p_img.transpose(-2, -1).flip(-2)
for line in d["lines"]:
line[3:7] = line[[4, 3, 6, 5]]
line[4:7:2] = p_img.shape[-2] - 1 - line[4:7:2]
rotated = True
AUG_RECORD += "rota1 "
# Random erase in random position
if self.cfg.DATA.get("PERSPECTIVE_AUGMENT", {}).get("erase"):
p_img = self.randomEraser(p_img)
# label不会因为erasing而改变
# Random gamma augmentation
if self.cfg.DATA.get("PERSPECTIVE_AUGMENT", {}).get("gamma"):
p = np.random.uniform(1, 2)
if np.random.randint(2) == 0:
p = 1 / p
p_img = p_img ** p
# Random noise augmentation
if self.cfg.DATA.get("PERSPECTIVE_AUGMENT", {}).get("noise") and np.random.randint(2) == 0:
noise = torch.randn(p_img.shape) * 0.025
p_img = torch.clip(p_img + noise, 0, 1)
oldDirection = [vec[7] for vec in d["lines"]]
d["lines"], mask = linesPostProcess(d["lines"], p_img.shape[1:], d["name"] == "U" or d["name"] == "D",
return_mask=True)
oldDirection = [v for v, b in zip(oldDirection, mask) if b]
if rotated and not (d["name"] == "U" or d["name"] == "D"):
# 当中间面发生了旋转时,需要根据oldDirection,原来是0的现在是1(计算结果为0),原来是1的现在是0(计算结果为2)
for i, (oldValue, newValue) in enumerate(zip(oldDirection, [vec[7] for vec in d["lines"]])):
if oldValue == 0:
assert newValue == 0
d["lines"][i][7] = 1
elif oldValue == 1:
# assert newValue == 2 # 这里并不需要,因为如果是绝对竖直线,yLR=nan,照样会输出direct=1.
assert newValue != 0 # 所以newValue也可能是1而不是2。只断言不是0就好
d["lines"][i][7] = 0
p_imgs.append(p_img)
if self.cfg.MODEL.HOUGH.CLINE_TYPE == "NEW":
liness.append(np.array(d["lines"]))
try:
peaks = self.linesToPeaksNew(d["lines"], self.view_size)
except:
assert False, AUG_RECORD
peakss.append(peaks)
peaks_for_label = peaks
label_hw = self.view_size
xPeaks, yPeaks, cUpPeaks, cDownPeaks = peaks_for_label
xLabels.append(
self.generate_gradual_hough_label(xPeaks, label_hw[1], type=self.hough_label_gradual_type,
base=self.cfg.MODEL.HOUGH.GRADUAL_LABEL.XY))
yLabels.append(
self.generate_gradual_hough_label(yPeaks, label_hw[0], type=self.hough_label_gradual_type,
base=self.cfg.MODEL.HOUGH.GRADUAL_LABEL.XY))
cline_angle_num = label_hw[1] + label_hw[0] // 2 * 2 - 2
cUpLabels.append(
self.generate_gradual_hough_label(cUpPeaks, cline_angle_num, type=self.hough_label_gradual_type,
base=self.cfg.MODEL.HOUGH.GRADUAL_LABEL.CUPDOWN))
cDownLabels.append(
self.generate_gradual_hough_label(cDownPeaks, cline_angle_num, type=self.hough_label_gradual_type,
base=self.cfg.MODEL.HOUGH.GRADUAL_LABEL.CUPDOWN))
else:
raise NotImplementedError()
n_cor = len(cor)
gt_floor_coor = cor[1::2]
gt_ceil_coor = cor[0::2]
gt_floor_xyz = np.hstack([
post_proc.np_coor2xy(gt_floor_coor, self.ch, self.W, self.H, floorW=1, floorH=1),
np.zeros((n_cor // 2, 1)) + self.ch,
])
gt_c = np.sqrt((gt_floor_xyz[:, :2] ** 2).sum(1))
gt_v2 = post_proc.np_coory2v(gt_ceil_coor[:, 1], self.H)
gt_ceil_z = gt_c * np.tan(gt_v2)
height = np.array([gt_ceil_z.mean() - self.ch], dtype=np.float32)
# Convert all data to tensor
e_img = torch.FloatTensor(e_img.transpose([2, 0, 1]))
# angle = torch.FloatTensor(angle)
# up_bin256 = torch.FloatTensor(up_bin256.copy())
# down_bin256 = torch.FloatTensor(down_bin256.copy())
height = torch.FloatTensor(height)
out_dict = {
"filename": filename,
"e_img": e_img,
"cor": cor,
"height": height,
"p_imgs": torch.stack(p_imgs, 0),
"xLabels": np.array(xLabels).astype(np.float32),
"yLabels": np.array(yLabels).astype(np.float32),
"cUpLabels": np.array(cUpLabels).astype(np.float32),
"cDownLabels": np.array(cDownLabels).astype(np.float32),
"peaks": peakss,
"lines": liness
}
return out_dict
@staticmethod
def generate_gradual_hough_label(peaks, res_len, loop=False, type="exp", base=0.96):
"""
根据若干个峰值点,生成渐变的数组,越靠近峰值点值越大,从而用于网络的直接计算loss。
:param peaks 数组,各个峰值点
:param res_len 结果数组的长度
:param loop 计算结果距离的时候是否视为是一个循环
"""
res = []
res.append(cdist(peaks.reshape(-1, 1), np.arange(res_len, dtype=np.float).reshape(-1, 1), p=1))
if loop:
res.append(cdist(peaks.reshape(-1, 1), np.arange(res_len).reshape(-1, 1) + res_len, p=1))
res.append(cdist(peaks.reshape(-1, 1), np.arange(res_len).reshape(-1, 1) - res_len, p=1))
dist = np.min(res, 0)
if dist.shape[0] > 0:
nearest_dist = dist.min(0) # shape(res_len),每个点距离最近的peak的距离
else:
# TODO 对于没有peak的情况要怎么处理?当作距离是inf是否合理?这样label就是0了
nearest_dist = np.ones(dist.shape[1:], dtype=dist.dtype) * np.inf
if type == "exp":
return (base ** nearest_dist).reshape(-1)
elif type == "nearest_only":
return (nearest_dist.reshape(-1) <= 0.5).astype(nearest_dist.dtype)
elif type == "nearest_k":
return (nearest_dist.reshape(-1) <= base + 0.5).astype(nearest_dist.dtype)
else:
raise NotImplementedError()
def linesToPeaks(self, lines, img_hw):
"""
:return xPeaks, yPeaks, cUpPeaks(过中心的上半圈线), cDownPeaks(过中心的下半圈线)
"""
xPeaks, yPeaks, cUpPeaks, cDownPeaks = [], [], [], []
for line in lines:
if line[7] == 0:
xPeaks.append(np.mean(line[3:7:2]))
elif line[7] == 1:
yPeaks.append(np.mean(line[4:7:2]))
elif line[7] == 2:
yCenter = np.mean(line[4:7:2])
ks = (line[4:7:2] - ((img_hw[0] - 1) / 2)) / (line[3:7:2] - ((img_hw[1] - 1) / 2))
# 角度规定为斜率的arctan。即上半圆,最左侧为0度、顺时针增长到180度;下半圆,最右侧为0度,顺时针增长到180度
deg = np.rad2deg(np.arctan(ks))
deg[deg < 0] += 180
meanAngleDeg = np.mean(deg)
if yCenter <= img_hw[0] / 2:
cUpPeaks.append(meanAngleDeg)
else:
cDownPeaks.append(meanAngleDeg)
return np.array(xPeaks), np.array(yPeaks), np.array(cUpPeaks), np.array(cDownPeaks)
@staticmethod
def coord2AngleValue(x, y, img_hw):
"""
根据所属区域,求交线坐标,并直接转换为角度数量值
:param x,y 直接输入图片中的坐标即可,不是中心坐标系
:return 对应于new算法的angle值;0或1,表示上半图还是下半图
"""
h2, w2 = (img_hw[0] - 1) / 2, (img_hw[1] - 1) / 2
h2f = img_hw[0] // 2
x = x - w2
y = y - h2
if x <= y <= -x:
# 与左侧相交
y2 = y / x * -w2
if y <= 0:
r = h2f - 1 - h2 - y2
return r, 0
else:
r = h2f + img_hw[1] - 2 + h2 - y2
return r, 1
elif -x <= y <= x:
# 与右侧相交
y2 = y / x * w2
if y <= 0:
r = h2f + img_hw[1] - 2 + h2 + y2
return r, 0
else:
r = h2f - 1 - h2 + y2
return r, 1
elif -y < x < y:
# 与下侧相交
x2 = x / y * h2
r = h2f - 1 + w2 - x2
return r, 1
elif y < x < -y:
# 与上侧相交
x2 = x / y * -h2
r = h2f - 1 + w2 + x2
return r, 0
@staticmethod
def linesToPeaksNewCore(lines, img_hw):
"""
:input: lines(n, 5) 五维分别代表x1,y1,x2,y2,线在视图中的类型-0竖直线1水平线2过中心线
:return xPeaks, yPeaks, cUpPeaks(过中心的上半圈线), cDownPeaks(过中心的下半圈线)
"""
def autoAbs(v):
if isinstance(v, torch.Tensor):
return v.abs()
elif isinstance(v, np.ndarray):
return np.abs(v)
return abs(v)
def toNdarrayOrTensor(v, ref):
if isinstance(ref, torch.Tensor):
return ref.new_tensor(v)
else:
return np.array(v)
xPeaks, yPeaks, cUpPeaks, cDownPeaks = [], [], [], []
xLengths, yLengths, cUpLengths, cDownLengths = [], [], [], []
for line in lines:
length_ratio = autoAbs(line[0:2] - line[2:4]) / toNdarrayOrTensor(img_hw, line)[[1, 0]]
if line[4] == 0:
xPeaks.append(line[0:4:2].mean())
xLengths.append(length_ratio[1])
elif line[4] == 1:
yPeaks.append(line[1:4:2].mean())
yLengths.append(length_ratio[0])
elif line[4] == 2:
# 对两个端点,计算其对应的角度(以边缘坐标系值为单位),两值直接算术平均作为最终的代表角度
r1, p1 = PerspectiveDataset.coord2AngleValue(*line[0:2], img_hw)
r2, p2 = PerspectiveDataset.coord2AngleValue(*line[2:4], img_hw)
if p1 != p2:
warnings.warn("cline two endpoint is not in same updown part!")
midPointDis = np.abs(line[1::2] - ((img_hw[0] - 1) / 2))
if midPointDis.argmin() == 0: # 应调整一号点
if p2 == 0: # 2号点在上半图,1号点也放到上半图
line[1] = ((img_hw[0] - 1) / 2) - 0.01
else:
line[1] = ((img_hw[0] - 1) / 2) + 0.01
else:
if p1 == 0:
line[3] = ((img_hw[0] - 1) / 2) - 0.01
else:
line[3] = ((img_hw[0] - 1) / 2) + 0.01
r1, p1 = PerspectiveDataset.coord2AngleValue(*line[0:2], img_hw)
r2, p2 = PerspectiveDataset.coord2AngleValue(*line[2:4], img_hw)
assert p1 == p2, "cline two endpoint is not in same updown part!"
meanAngleValue = (r1 + r2) / 2 # 直接求算术平均
if p1 == 0:
cUpPeaks.append(meanAngleValue)
cUpLengths.append(length_ratio.max() * 2)
else:
cDownPeaks.append(meanAngleValue)
cDownLengths.append(length_ratio.max() * 2)
return (xPeaks, yPeaks, cUpPeaks, cDownPeaks), (xLengths, yLengths, cUpLengths, cDownLengths)
def linesToPeaksNew(self, lines, img_hw):
return [np.array(item) for item in self.linesToPeaksNewCore([line[3:8] for line in lines], img_hw)[0]]
@staticmethod
def collate(batch):
def collateByKey(batch, key):
if key == "cor":
return [PerspectiveDataset.collate(d[key]) for d in batch]
else:
return PerspectiveDataset.collate([d[key] for d in batch])
elem = batch[0]
if isinstance(elem, dict):
return {key: collateByKey(batch, key) for key in elem}
elif isinstance(elem, list) or isinstance(elem, tuple):
return [PerspectiveDataset.collate(d) if isinstance(d[0], list) or isinstance(d[0], tuple)
else [default_collate([v]).squeeze(0) for v in d]
for d in batch]
return default_collate(batch)
def cor2xybound(cor):
''' Helper function to clip max/min stretch factor '''
corU = cor[0::2]
corB = cor[1::2]
zU = -50
u = panostretch.coorx2u(corU[:, 0])
vU = panostretch.coory2v(corU[:, 1])
vB = panostretch.coory2v(corB[:, 1])
x, y = panostretch.uv2xy(u, vU, z=zU)
c = np.sqrt(x ** 2 + y ** 2)
zB = c * np.tan(vB)
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
S = 3 / abs(zB.mean() - zU)
dx = [abs(xmin * S), abs(xmax * S)]
dy = [abs(ymin * S), abs(ymax * S)]
return min(dx), min(dy), max(dx), max(dy)
if __name__ == '__main__':
cfg = EasyDict()
data = EasyDict()
cfg.DATA = data
cfg.DATA.ROOT_DIR = "data/layoutnet_dataset"
dataset = PerspectiveDataset(cfg, "train")
d = dataset[0]
a = 1
def worker_init_fn(_):
worker_info = torch.utils.data.get_worker_info()
dataset = worker_info.dataset
seed = worker_info.seed
np.random.seed((seed + _) % 2 ** 32)
# # Avoid "cannot pickle KVReader object" error
# dataset.reader = KVReader(dataset.path, dataset.num_readers)
| 26,199 | 42.812709 | 118 | py |
DMH-Net | DMH-Net-main/train.py | import argparse
import os
# import ipdb
import sys
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.utils import make_grid
from tqdm import trange
from config import cfg, cfg_from_yaml_file, cfg_from_list
from eval import valid
from misc.utils import save_model, load_trained_model
from model import ENCODER_RESNET, ENCODER_DENSENET, ENCODER_HOUGH, DMHNet
from perspective_dataset import PerspectiveDataset, worker_init_fn
GAMMA = 2
ALPHA_XY = 1.0
ALPHA_MATCH = 10.0
ALPHA_ANGLE = 1.0
ALPHA_HEIGHT = 1.0
def feed_forward(net, x, angle, up_bins, down_bins, edge, height, return_results=False):
up_bin256 = up_bins
down_bin256 = down_bins
x = x.to(device)
angle = angle.to(device)
up_bin256 = up_bin256.to(device)
down_bin256 = down_bin256.to(device)
edge = edge.to(device)
height = height.to(device)
losses = {}
angle_, up_xy_, down_xy_, edge_, height_, results_dict = net(x)
# Match loss
# Edge classification loss
loss_edg = F.binary_cross_entropy_with_logits(edge_, edge, reduction='none')
loss_edg[edge == 0.] *= 0.2
loss_edg = loss_edg.mean()
losses['edge'] = loss_edg
# Height loss
losses['height'] = ALPHA_HEIGHT * F.l1_loss(height_, height)
# X-Y classification loss
# losses['fuse_xy'] = ALPHA_XY * F.binary_cross_entropy_with_logits(fuse_xy_, up_bin256)
losses['up_xy'] = ALPHA_XY * F.binary_cross_entropy_with_logits(up_xy_, up_bin256)
losses['down_xy'] = ALPHA_XY * F.binary_cross_entropy_with_logits(down_xy_, down_bin256)
# Angle classification loss
loss_cor_ori = ALPHA_ANGLE * F.binary_cross_entropy_with_logits(angle_, angle)
# pt_cor = torch.exp(-loss_cor_ori)
losses['angle'] = loss_cor_ori
# ALPHA_ANGLE * ((1 - pt_cor)**GAMMA * loss_cor_ori).mean()
idx = torch.arange(256).view(1, 256, 1)
idx = idx.to(device)
up_reg = (idx * F.softmax(up_xy_, 2)).sum(2).squeeze(1)
down_reg = (idx * F.softmax(down_xy_, 2)).sum(2).squeeze(1)
ratio = up_reg / (down_reg + 1e-8)
losses['match'] = torch.abs(ratio - 1.).mean()
# Total loss
losses['total'] = losses['up_xy'] + losses['down_xy'] + losses['angle'] + losses['edge']
losses['total'] += losses['height']
losses['total'] += losses['match']
# For model selection
with torch.no_grad():
nobrain_baseline_xy = 1.
score_xy_up = 1 - (torch.sigmoid(up_xy_) - up_bin256).abs().mean() / nobrain_baseline_xy
score_xy_down = 1 - (torch.sigmoid(down_xy_) - down_bin256).abs().mean() / nobrain_baseline_xy
nobrain_baseline_angle = 1.
score_angle = 1 - (torch.sigmoid(angle_) - angle).abs().mean() / nobrain_baseline_angle
losses['score'] = (score_angle + score_xy_up + score_xy_down) / 3
results_dict['angle'] = angle_.detach()
results_dict['up_xy'] = up_xy_.detach()
results_dict['down_xy'] = down_xy_.detach()
if return_results:
return losses, results_dict
else:
return losses
def feature_viz(name, tb_writer):
def hook(model, input, output):
feat = output.detach()
feat_reshape = feat.view(-1, 1, feat.shape[2], feat.shape[3])
img = make_grid(feat_reshape, normalize=True)
tb_writer.add_image(name, img.cpu())
return hook
def visualize_item(x, y_cor, results_dict):
x = (x.numpy().transpose([1, 2, 0]) * 255).astype(np.uint8)
y_cor = y_cor.numpy()
gt_cor = np.zeros((30, 1024, 3), np.uint8)
gt_cor[:] = y_cor[0][None, :, None] * 255
img_pad = np.zeros((3, 1024, 3), np.uint8) + 255
cor_img = np.concatenate([gt_cor, img_pad, x], 0)
up_img = results_dict['up_img'].detach().cpu()[0]
up_img = (up_img.clone().numpy().transpose([1, 2, 0]) * 255).astype(np.uint8)
down_img = results_dict['down_img'].detach().cpu()[0]
down_img = (down_img.clone().numpy().transpose([1, 2, 0]) * 255).astype(np.uint8)
xy = torch.sigmoid(results_dict['up_xy']).detach().cpu()[0, 0].clone().numpy()
dir_x_up = np.concatenate([xy[:, 0][::-1], xy[:, 2]], 0)
dir_y_up = np.concatenate([xy[:, 1][::-1], xy[:, 3]], 0)
x_up_prob = np.zeros((30, 512, 3), np.uint8)
x_up_prob[:] = dir_x_up[None, :, None] * 255
y_up_prob = np.zeros((512, 30, 3), np.uint8)
y_up_prob[:] = dir_y_up[:, None, None] * 255
stich_up_canvas = np.zeros((30 + 3 + 512, 30 + 3 + 512, 3), np.uint8) + 255
stich_up_canvas[33:, 33:, :] = up_img
stich_up_canvas[33:, :30, :] = y_up_prob
stich_up_canvas[:30, 33:, :] = x_up_prob
xy = torch.sigmoid(results_dict['down_xy']).detach().cpu()[0, 0].clone().numpy()
dir_x_down = np.concatenate([xy[:, 0][::-1], xy[:, 2]], 0)
dir_y_down = np.concatenate([xy[:, 1][::-1], xy[:, 3]], 0)
x_down_prob = np.zeros((30, 512, 3), np.uint8)
x_down_prob[:] = dir_x_down[None, :, None] * 255
y_down_prob = np.zeros((512, 30, 3), np.uint8)
y_down_prob[:] = dir_y_down[:, None, None] * 255
stich_down_canvas = np.zeros((30 + 3 + 512, 30 + 3 + 512, 3), np.uint8) + 255
stich_down_canvas[33:, 33:, :] = down_img
stich_down_canvas[33:, :30, :] = y_down_prob
stich_down_canvas[:30, 33:, :] = x_down_prob
return cor_img, stich_up_canvas, stich_down_canvas
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--cfg_file', '-c', type=str, required=True, help='specify the config for training')
parser.add_argument('--id', required=True, help='experiment id to name checkpoints and logs')
parser.add_argument('--ckpt', default='./ckpt', help='folder to output checkpoints')
parser.add_argument('--logs', default='./logs', help='folder to logging')
parser.add_argument('--pth', default=None, help='path to load saved checkpoint.' '(finetuning)')
# Model related
parser.add_argument('--backbone',
default='drn38',
choices=ENCODER_RESNET + ENCODER_DENSENET + ENCODER_HOUGH,
help='backbone of the network')
parser.add_argument('--no_rnn', action='store_true', help='whether to remove rnn or not')
# Dataset related arguments
# TODO 原始代码交换了测试集与训练集 没有验证集
# 新代码用的就是原始的训练集和测试集
parser.add_argument('--train_root_dir',
default='data/layoutnet_dataset/test',
help='root directory to training dataset. '
'should contains img, label_cor subdirectories')
parser.add_argument('--valid_root_dir',
default='data/layoutnet_dataset/train',
help='root directory to validation dataset. '
'should contains img, label_cor subdirectories')
parser.add_argument('--no_flip', action='store_true', help='disable left-right flip augmentation')
parser.add_argument('--no_rotate', action='store_true', help='disable horizontal rotate augmentation')
parser.add_argument('--no_gamma', action='store_true', help='disable gamma augmentation')
parser.add_argument('--no_erase', action='store_true', help='disable radom erasing augmentation')
parser.add_argument('--no_noise', action='store_true', help='disable radom noise augmentation')
parser.add_argument('--no_pano_stretch', action='store_true', help='disable pano stretch')
parser.add_argument('--num_workers', '-j', type=int, help='numbers of workers for dataloaders')
# optimization related arguments
parser.add_argument('--freeze_earlier_blocks', default=-1, type=int)
parser.add_argument('--batch_size', '-b', type=int, help='batch size')
# parser.add_argument('--batch_size_valid', default=2, type=int, help='validation mini-batch size')
parser.add_argument('--epochs', type=int, help='epochs to train')
parser.add_argument('--optim', default='Adam', help='optimizer to use. only support SGD and Adam')
parser.add_argument('--lr', type=float, help='learning rate')
parser.add_argument('--lr_per_sample', type=float, help='learning rate per sample')
parser.add_argument('--lr_pow', default=0.9, type=float, help='power in poly to drop LR')
parser.add_argument('--warmup_lr', default=1e-6, type=float, help='starting learning rate for warm up')
parser.add_argument('--warmup_epochs', default=0, type=int, help='numbers of warmup epochs')
parser.add_argument('--beta1', default=0.9, type=float, help='momentum for sgd, beta1 for adam')
parser.add_argument('--weight_decay', default=0, type=float, help='factor for L2 regularization')
parser.add_argument('--valid_visu', default=1, type=int, help='how many batches to be visualized when eval')
# Misc arguments
parser.add_argument('--no_cuda', action='store_true', help='disable cuda')
parser.add_argument('--seed', default=594277, type=int, help='manual seed')
parser.add_argument('--disp_iter', type=int, default=1, help='iterations frequency to display')
parser.add_argument('--save_every', type=int, default=25, help='epochs frequency to save state_dict')
parser.add_argument('--no_multigpus', action='store_true', help='disable data parallel')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
if args.batch_size is not None:
cfg.OPTIM.BATCH_SIZE = args.batch_size
if args.lr is not None or args.lr_per_sample is not None:
if args.lr is not None and args.lr_per_sample is not None:
assert False, "不能同时指定--lr和--lr_per_sample!"
if args.lr is not None:
cfg.OPTIM.LR = args.lr
if args.lr_per_sample is not None:
cfg.OPTIM.LR = args.lr_per_sample * cfg.OPTIM.BATCH_SIZE
if args.epochs is not None:
cfg.OPTIM.MAX_EPOCH = args.epochs
if args.num_workers is None:
args.num_workers = min(max(8, cfg.OPTIM.BATCH_SIZE), os.cpu_count()) if not sys.gettrace() else 0
device = torch.device('cpu' if args.no_cuda else 'cuda')
np.random.seed(args.seed)
torch.manual_seed(args.seed)
os.makedirs(os.path.join(args.ckpt, args.id), exist_ok=True)
# Create dataloader
dataset_train = PerspectiveDataset(cfg, "train", train_mode=True)
dataset_train_size = len(dataset_train)
print("num_workers: " + str(args.num_workers))
print("batch_size: " + str(cfg.OPTIM.BATCH_SIZE))
print("train_set_size: " + str(dataset_train_size))
loader_train = DataLoader(
dataset_train,
cfg.OPTIM.BATCH_SIZE,
collate_fn=dataset_train.collate,
shuffle=True,
drop_last=False,
num_workers=args.num_workers,
pin_memory=not args.no_cuda,
worker_init_fn=worker_init_fn)
if args.valid_root_dir:
dataset_valid = PerspectiveDataset(cfg, "valid")
loader_valid = DataLoader(dataset_valid,
min(cfg.OPTIM.BATCH_SIZE, 4),
collate_fn=dataset_valid.collate,
shuffle=False,
drop_last=False,
num_workers=args.num_workers,
pin_memory=not args.no_cuda,
worker_init_fn=worker_init_fn)
# Create model
if args.pth is not None:
print('Finetune model is given.')
print('Ignore --backbone and --no_rnn')
net = load_trained_model(DMHNet, args.pth, cfg, cfg.MODEL.get("BACKBONE", {}).get("NAME", "drn38"),
not args.no_rnn).to(device)
else:
net = DMHNet(cfg, cfg.MODEL.get("BACKBONE", {}).get("NAME", "drn38"), not args.no_rnn).to(device)
if not args.no_multigpus:
net = nn.DataParallel(net) # multi-GPU
# Create optimizer
print("LR {:f}".format(cfg.OPTIM.LR))
if cfg.OPTIM.TYPE == 'SGD':
optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),
lr=cfg.OPTIM.LR,
momentum=args.beta1,
weight_decay=args.weight_decay)
elif cfg.OPTIM.TYPE == 'Adam':
optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()),
lr=cfg.OPTIM.LR,
betas=(args.beta1, 0.999),
weight_decay=args.weight_decay)
else:
raise NotImplementedError()
# Create tensorboard for monitoring training
tb_path = os.path.join(args.logs, args.id)
os.makedirs(tb_path, exist_ok=True)
tb_writer = SummaryWriter(log_dir=tb_path)
# Init variable
args.warmup_iters = args.warmup_epochs * len(loader_train)
# args.max_iters = args.epochs * len(loader_train)
# args.running_lr = args.warmup_lr if args.warmup_epochs > 0 else args.lr
milestones = cfg.OPTIM.get("SCHEDULER", {}).get("MILESTONES", [50, 100])
gamma = cfg.OPTIM.get("SCHEDULER", {}).get("GAMMA", 0.3)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=gamma)
tb_writer.add_text("cfg", str(cfg))
tb_writer.add_text("args", str(args))
tb_writer.add_text("gpuid", os.environ.get("CUDA_VISIBLE_DEVICES", "None"))
# Init bin mask
# anglex = np.linspace(-256, 255, 512)
# angley = np.linspace(256, -255, 512)
# xv, yv = np.meshgrid(anglex, angley)
# # idx is the mapping table
# idx = (np.rad2deg(np.arctan2(xv, yv)) + 180 - 1).astype(int)
# binary_mask = np.zeros((512, 512, 360))
# for i in range(360):
# binary_mask[np.where(idx == i)[0], np.where(idx == i)[1], i] = 1
# binary_mask = torch.tensor(binary_mask, dtype=torch.float32)
best_valid_score = 0 # 筛选最佳模型:以3DIoU为准
# Start training
for ith_epoch in trange(1, cfg.OPTIM.MAX_EPOCH + 1, desc='Epoch', unit='ep'):
# Train phase
net.train()
# torch.cuda.empty_cache()
iterator_train = iter(loader_train)
cur_sample_count = 0
for _ in trange(len(loader_train), desc='Train ep%s' % ith_epoch, position=1):
# Set learning rate
# adjust_learning_rate(optimizer, args)
input = next(iterator_train)
for k in input:
if isinstance(input[k], torch.Tensor):
input[k] = input[k].to(device)
cur_sample_count += len(input["p_imgs"])
tb_total_sample_count = (ith_epoch - 1) * dataset_train_size + cur_sample_count
losses, results_dict = net(input)
for k, v in losses.items():
k = 'train/%s' % k
tb_writer.add_scalar(k, v.item(), tb_total_sample_count)
loss = losses['total']
# backprop
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(net.parameters(), 1.0, norm_type=2)
optimizer.step()
tb_writer.add_scalar('train/lr', optimizer.param_groups[0]["lr"], ith_epoch)
# Valid phase
valid_loss, imgs, metrics = valid(cfg, net, loader_valid, dataset_valid, device, args.valid_visu, valid_epoch=ith_epoch)
if cfg.get("TEST_METRIC", False):
dataset_test = PerspectiveDataset(cfg, "test")
loader_test = DataLoader(dataset_test,
min(cfg.OPTIM.BATCH_SIZE, 4),
collate_fn=dataset_test.collate,
shuffle=False,
drop_last=False,
num_workers=args.num_workers,
pin_memory=not args.no_cuda,
worker_init_fn=worker_init_fn)
test_loss, test_imgs, test_metrics = valid(cfg, net, loader_test, dataset_test, device, 0)
for k, v in test_metrics.items():
print("{:s} {:f}".format(k, v))
tb_writer.add_scalar('testmetric/%s' % k, v, ith_epoch)
for k, v in imgs.items():
tb_writer.add_image('valid/{:s}'.format(k), v, ith_epoch, dataformats="HWC")
for k, v in valid_loss.items():
print("{:s} {:f}".format(k, v))
tb_writer.add_scalar('valid/%s' % k, v, ith_epoch)
for k, v in metrics.items():
print("{:s} {:f}".format(k, v))
tb_writer.add_scalar('metric/%s' % k, v, ith_epoch)
# Save best validation loss model
if "3DIoU" in metrics:
valid_score = metrics["3DIoU"]
else:
valid_score = 100 - valid_loss["total"] # 无后处理训练时,筛选模型使用
if valid_score >= best_valid_score:
best_valid_score = valid_score
print("save BEST VALID ckpt " + str(ith_epoch))
save_model(net, os.path.join(args.ckpt, args.id, 'best_valid.pth'), args)
# Periodically save model
if ith_epoch % args.save_every == 0:
print("save ckpt " + str(ith_epoch))
save_model(net, os.path.join(args.ckpt, args.id, 'epoch_%d.pth' % ith_epoch), args)
scheduler.step()
if cfg.get("FINAL_EVAL", False):
print("现在开始finalEval!")
commandLine = "python eval.py --cfg_file {:s} --ckpt ckpt/{:s}/best_valid.pth --print_detail --output_file".format(args.cfg_file, args.id)
if cfg.get("FINAL_EVAL_METHOD"):
commandLine += " --set POST_PROCESS.METHOD {:s}".format(cfg.FINAL_EVAL_METHOD)
print("要执行的命令行 " + commandLine)
os.system(commandLine)
| 18,050 | 44.583333 | 146 | py |
DMH-Net | DMH-Net-main/preprocess.py | '''
This script preprocess the given 360 panorama image under euqirectangular projection
and dump them to the given directory for further layout prediction and visualization.
The script will:
- extract and dump the vanishing points
- rotate the equirect image to align with the detected VP
- extract the VP aligned line segments (for further layout prediction model)
The dump files:
- `*_VP.txt` is the vanishg points
- `*_aligned_rgb.png` is the VP aligned RGB image
- `*_aligned_line.png` is the VP aligned line segments images
Author: Cheng Sun
Email : chengsun@gapp.nthu.edu.tw
'''
import os
import glob
import argparse
import numpy as np
from PIL import Image
import sys
from misc.pano_lsd_align import panoEdgeDetection, rotatePanorama, uv2xyzN, xyz2uvN
from misc.grab_data import dump_to_txt
def alignLabel(cor_points, vp_data, sphereH, sphereW):
TX = cor_points[:, 0].reshape(-1, 1) + 1
TY = cor_points[:, 1].reshape(-1, 1) + 1
ANGx = (TX - sphereW / 2 - 0.5) / sphereW * np.pi * 2
ANGy = -(TY - sphereH / 2 - 0.5) / sphereH * np.pi
uvOld = np.hstack([ANGx, ANGy])
xyzOld = uv2xyzN(uvOld, 1)
R = np.linalg.inv(vp_data.T)
xyzNew = xyzOld @ R.T
uvNew = xyz2uvN(xyzNew, 1)
Px = (uvNew[:, 0] + np.pi) / (2 * np.pi) * sphereW + 0.5
Py = (-uvNew[:, 1] + np.pi / 2) / np.pi * sphereH + 0.5
gt_points = np.vstack([Px, Py]).astype(int).T
return gt_points
def saveRelatedLabel(base_folder, base_name, vp_data, sphereH, sphereW):
'''
After algin the pano image, we should also algin the gt cor and door
'''
cor_path = '%s/../label_cor/%s.txt' % (base_folder, base_name)
ds_path = '%s/../label_cor/%s_ds.txt' % (base_folder, base_name)
if not (os.path.isfile(os.path.join(cor_path))
and os.path.isfile(os.path.join(ds_path))):
print('fail to locate %s and %s' % (cor_path, ds_path))
return False
with open(cor_path) as f:
cor = np.array([line.strip().split() for line in f if line.strip()],
np.float32)
if not cor.any():
return False
gt_points = alignLabel(cor, vp_data, sphereH, sphereW)
dump_to_txt('%s/../label_cor_bak/%s.txt' % (base_folder, base_name),
gt_points)
with open(ds_path) as f:
cor_ds = np.array([line.strip().split() for line in f if line.strip()],
np.float32)
if not cor_ds.any():
return False
ds_points = alignLabel(cor_ds, vp_data, sphereH, sphereW)
dump_to_txt('%s/../label_cor_bak/%s_ds.txt' % (base_folder, base_name),
ds_points)
return True
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter)
# I/O related arguments
parser.add_argument('--img_glob',
required=True,
help='NOTE: Remeber to quote your glob path.')
parser.add_argument('--output_dir', required=True)
parser.add_argument('--rgbonly',
action='store_true',
help='Add this if use are preparing customer dataset')
# Preprocessing related arguments
parser.add_argument('--q_error', default=0.7, type=float)
parser.add_argument('--refine_iter', default=3, type=int)
parser.add_argument('--start_idx', default=-1, type=int)
parser.add_argument('--end_idx', default=-1, type=int)
args = parser.parse_args()
paths = sorted(glob.glob(args.img_glob))
if len(paths) == 0:
sys.exit('no images found')
# Check given path exist
for path in paths:
assert os.path.isfile(path), '%s not found' % path
# Check target directory
if not os.path.isdir(args.output_dir):
print('Output directory %s not existed. Create one.')
os.makedirs(args.output_dir)
START_INDEX = 0 if (args.start_idx < 0) else args.start_idx
END_INDEX = len(paths) if (args.end_idx < 0) else args.end_idx
file_list = list(
filter(lambda x: os.path.basename(x).startswith('%05d_' % START_INDEX),
paths))
if not file_list:
sys.exit('Wrong starting idx for file')
start_list_idx = paths.index(file_list[0])
END_INDEX = 1 #len(paths) if (END_INDEX > len(paths)) else END_INDEX
file_list = list(
filter(lambda x: os.path.basename(x).startswith('%05d_' % END_INDEX),
paths))
#if not file_list:
# sys.exit('Wrong ending idx for file')
#end_list_idx = paths.index(file_list[0])
# Process each input
#for i_path in paths:
for path_index in range(len(paths)): #(start_list_idx, end_list_idx):
i_path = paths[path_index]
print('Processing', i_path, flush=True)
# Load and cat input images
img_ori = np.array(Image.open(i_path).resize((1024, 512),
Image.BICUBIC))[..., :3]
# VP detection and line segment extraction
_, vp, _, _, panoEdge, _, _ = panoEdgeDetection(
img_ori, qError=args.q_error, refineIter=args.refine_iter)
panoEdge = (panoEdge > 0)
# Align images with VP
i_img = rotatePanorama(img_ori / 255.0, vp[2::-1])
l_img = rotatePanorama(panoEdge.astype(np.float32), vp[2::-1])
# Dump results
basename = os.path.splitext(os.path.basename(i_path))[0]
if args.rgbonly:
path = os.path.join(args.output_dir, '%s.png' % basename)
path_to_label_cor = '%s/../label_cor/%s.txt'
path_to_label_ds = '%s/../label_cor/%s_ds.txt'
if (saveRelatedLabel(args.output_dir, basename, vp[2::-1], 512, 1024)):
Image.fromarray((i_img * 255).astype(np.uint8)).save(path)
else:
print('Failed to process %s', path)
else:
path_VP = os.path.join(args.output_dir, '%s_VP.txt' % basename)
path_i_img = os.path.join(args.output_dir,
'%s_aligned_rgb.png' % basename)
path_l_img = os.path.join(args.output_dir,
'%s_aligned_line.png' % basename)
with open(path_VP, 'w') as f:
for i in range(3):
f.write('%.6f %.6f %.6f\n' % (vp[i, 0], vp[i, 1], vp[i, 2]))
Image.fromarray((i_img * 255).astype(np.uint8)).save(path_i_img)
Image.fromarray((l_img * 255).astype(np.uint8)).save(path_l_img)
| 6,175 | 36.658537 | 85 | py |
DMH-Net | DMH-Net-main/layout_viewer.py | import json
import open3d
from PIL import Image
from scipy.ndimage import map_coordinates
from tqdm import trange
from misc.panostretch import pano_connect_points
from misc.post_proc import np_coor2xy, np_coory2v
def xyz_2_coorxy(xs, ys, zs, H, W):
''' Mapping 3D xyz coordinates to equirect coordinate '''
us = np.arctan2(xs, -ys)
vs = -np.arctan(zs / np.sqrt(xs**2 + ys**2))
coorx = (us / (2 * np.pi) + 0.5) * W
coory = (vs / np.pi + 0.5) * H
return coorx, coory
def create_ceiling_floor_mask(cor_id, H, W):
'''
Binary masking on equirectangular
where 1 indicate floor or ceiling
'''
# Prepare 1d ceiling-wall/floor-wall boundary
c_pts = []
f_pts = []
n_cor = len(cor_id)
for i in range(n_cor // 2):
# Ceiling boundary points
xys = pano_connect_points(cor_id[i * 2],
cor_id[(i * 2 + 2) % n_cor],
z=-50,
w=W,
h=H)
c_pts.extend(xys)
# Floor boundary points
xys = pano_connect_points(cor_id[i * 2 + 1],
cor_id[(i * 2 + 3) % n_cor],
z=50,
w=W,
h=H)
f_pts.extend(xys)
# Sort for interpolate
c_pts = np.array(c_pts)
c_pts = c_pts[np.argsort(c_pts[:, 0] * H - c_pts[:, 1])]
f_pts = np.array(f_pts)
f_pts = f_pts[np.argsort(f_pts[:, 0] * H + f_pts[:, 1])]
# Removed duplicated point
c_pts = np.concatenate([c_pts[:1], c_pts[1:][np.diff(c_pts[:, 0]) > 0]], 0)
f_pts = np.concatenate([f_pts[:1], f_pts[1:][np.diff(f_pts[:, 0]) > 0]], 0)
# Generate boundary for each image column
c_bon = np.interp(np.arange(W), c_pts[:, 0], c_pts[:, 1])
f_bon = np.interp(np.arange(W), f_pts[:, 0], f_pts[:, 1])
# Generate mask
mask = np.zeros((H, W), np.bool)
for i in range(W):
u = max(0, int(round(c_bon[i])) + 1)
b = min(W, int(round(f_bon[i])))
mask[:u, i] = 1
mask[b:, i] = 1
return mask
def warp_walls(equirect_texture, xy, floor_z, ceil_z, ppm):
''' Generate all walls' xyzrgba '''
H, W = equirect_texture.shape[:2]
all_rgb = []
all_xyz = []
for i in trange(len(xy), desc='Processing walls'):
next_i = (i + 1) % len(xy)
xy_a = xy[i]
xy_b = xy[next_i]
xy_w = np.sqrt(((xy_a - xy_b)**2).sum())
t_h = int(round((ceil_z - floor_z) * ppm))
t_w = int(round(xy_w * ppm))
xs = np.linspace(xy_a[0], xy_b[0], t_w)[None].repeat(t_h, 0)
ys = np.linspace(xy_a[1], xy_b[1], t_w)[None].repeat(t_h, 0)
zs = np.linspace(floor_z, ceil_z, t_h)[:, None].repeat(t_w, 1)
coorx, coory = xyz_2_coorxy(xs, ys, zs, H, W)
plane_texture = np.stack([
map_coordinates(equirect_texture[..., 0], [coory, coorx],
order=1,
mode='wrap'),
map_coordinates(equirect_texture[..., 1], [coory, coorx],
order=1,
mode='wrap'),
map_coordinates(equirect_texture[..., 2], [coory, coorx],
order=1,
mode='wrap'),
], -1)
plane_xyz = np.stack([xs, ys, zs], axis=-1)
all_rgb.extend(plane_texture.reshape(-1, 3))
all_xyz.extend(plane_xyz.reshape(-1, 3))
return all_rgb, all_xyz
def warp_floor_ceiling(equirect_texture, ceil_floor_mask, xy, z_floor,
z_ceiling, ppm):
''' Generate floor's and ceiling's xyzrgba '''
assert equirect_texture.shape[:2] == ceil_floor_mask.shape[:2]
H, W = equirect_texture.shape[:2]
min_x = xy[:, 0].min()
max_x = xy[:, 0].max()
min_y = xy[:, 1].min()
max_y = xy[:, 1].max()
t_h = int(round((max_y - min_y) * ppm))
t_w = int(round((max_x - min_x) * ppm))
xs = np.linspace(min_x, max_x, t_w)[None].repeat(t_h, 0)
ys = np.linspace(min_y, max_y, t_h)[:, None].repeat(t_w, 1)
zs_floor = np.zeros_like(xs) + z_floor
zs_ceil = np.zeros_like(xs) + z_ceiling
coorx_floor, coory_floor = xyz_2_coorxy(xs, ys, zs_floor, H, W)
coorx_ceil, coory_ceil = xyz_2_coorxy(xs, ys, zs_ceil, H, W)
# Project view
floor_texture = np.stack([
map_coordinates(equirect_texture[..., 0], [coory_floor, coorx_floor],
order=1,
mode='wrap'),
map_coordinates(equirect_texture[..., 1], [coory_floor, coorx_floor],
order=1,
mode='wrap'),
map_coordinates(equirect_texture[..., 2], [coory_floor, coorx_floor],
order=1,
mode='wrap'),
], -1)
floor_mask = map_coordinates(ceil_floor_mask, [coory_floor, coorx_floor],
order=0)
floor_xyz = np.stack([xs, ys, zs_floor], axis=-1)
ceil_texture = np.stack([
map_coordinates(equirect_texture[..., 0], [coory_ceil, coorx_ceil],
order=1,
mode='wrap'),
map_coordinates(equirect_texture[..., 1], [coory_ceil, coorx_ceil],
order=1,
mode='wrap'),
map_coordinates(equirect_texture[..., 2], [coory_ceil, coorx_ceil],
order=1,
mode='wrap'),
], -1)
ceil_mask = map_coordinates(ceil_floor_mask, [coory_ceil, coorx_ceil],
order=0)
ceil_xyz = np.stack([xs, ys, zs_ceil], axis=-1)
floor_texture = floor_texture[floor_mask]
floor_xyz = floor_xyz[floor_mask]
ceil_texture = ceil_texture[ceil_mask]
ceil_xyz = ceil_xyz[ceil_mask]
return floor_texture, floor_xyz, ceil_texture, ceil_xyz
def create_occlusion_mask(xyz):
xs, ys, zs = xyz.T
ds = np.sqrt(xs**2 + ys**2 + zs**2)
# Reorder by depth (from far to close)
idx = np.argsort(-ds)
xs, ys, zs, ds = xs[idx], ys[idx], zs[idx], ds[idx]
# Compute coresponding quirect coordinate
coorx, coory = xyz_2_coorxy(xs, ys, zs, H=256, W=512)
quan_coorx = np.round(coorx).astype(int) % W
quan_coory = np.round(coory).astype(int) % H
# Generate layout depth
depth_map = np.zeros((H, W), np.float32) + 1e9
depth_map[quan_coory, quan_coorx] = ds
tol_map = np.max([
np.abs(np.diff(depth_map, axis=0, append=depth_map[[-2]])),
np.abs(np.diff(depth_map, axis=1, append=depth_map[:, [0]])),
np.abs(np.diff(depth_map, axis=1, prepend=depth_map[:, [-1]])),
], 0)
# filter_ds = map_coordinates(depth_map, [coory, coorx], order=1, mode='wrap')
# tol_ds = map_coordinates(tol_map, [coory, coorx], order=1, mode='wrap')
filter_ds = depth_map[quan_coory, quan_coorx]
tol_ds = tol_map[quan_coory, quan_coorx]
mask = ds > (filter_ds + 2 * tol_ds)
return mask, idx
"""Module which creates mesh lines from a line set
Open3D relies upon using glLineWidth to set line width on a LineSet
However, this method is now deprecated and not fully supporeted in newer OpenGL versions
See:
Open3D Github Pull Request - https://github.com/intel-isl/Open3D/pull/738
Other Framework Issues - https://github.com/openframeworks/openFrameworks/issues/3460
This module aims to solve this by converting a line into a triangular mesh (which has thickness)
The basic idea is to create a cylinder for each line segment, translate it, and then rotate it.
License: MIT
"""
import numpy as np
import open3d as o3d
def align_vector_to_another(a=np.array([0, 0, 1]), b=np.array([1, 0, 0])):
"""
Aligns vector a to vector b with axis angle rotation
"""
if np.array_equal(a, b):
return None, None
axis_ = np.cross(a, b)
axis_ = axis_ / np.linalg.norm(axis_)
angle = np.arccos(np.dot(a, b))
return axis_, angle
def normalized(a, axis=-1, order=2):
"""Normalizes a numpy array of points"""
l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
l2[l2 == 0] = 1
return a / np.expand_dims(l2, axis), l2
class LineMesh(object):
def __init__(self, points, lines=None, colors=[0, 1, 0], radius=0.15):
"""Creates a line represented as sequence of cylinder triangular meshes
Arguments:
points {ndarray} -- Numpy array of ponts Nx3.
Keyword Arguments:
lines {list[list] or None} -- List of point index pairs denoting line segments. If None, implicit lines from ordered pairwise points. (default: {None})
colors {list} -- list of colors, or single color of the line (default: {[0, 1, 0]})
radius {float} -- radius of cylinder (default: {0.15})
"""
self.points = np.array(points)
self.lines = np.array(
lines) if lines is not None else self.lines_from_ordered_points(self.points)
self.colors = np.array(colors)
self.radius = radius
self.cylinder_segments = []
self.create_line_mesh()
@staticmethod
def lines_from_ordered_points(points):
lines = [[i, i + 1] for i in range(0, points.shape[0] - 1, 1)]
return np.array(lines)
def create_line_mesh(self):
first_points = self.points[self.lines[:, 0], :]
second_points = self.points[self.lines[:, 1], :]
line_segments = second_points - first_points
line_segments_unit, line_lengths = normalized(line_segments)
z_axis = np.array([0, 0, 1])
# Create triangular mesh cylinder segments of line
for i in range(line_segments_unit.shape[0]):
line_segment = line_segments_unit[i, :]
line_length = line_lengths[i]
# get axis angle rotation to allign cylinder with line segment
axis, angle = align_vector_to_another(z_axis, line_segment)
# Get translation vector
translation = first_points[i, :] + line_segment * line_length * 0.5
# create cylinder and apply transformations
cylinder_segment = o3d.geometry.TriangleMesh.create_cylinder(
self.radius, line_length)
cylinder_segment = cylinder_segment.translate(
translation, relative=False)
if axis is not None:
axis_a = axis * angle
cylinder_segment = cylinder_segment.rotate(
R=o3d.geometry.get_rotation_matrix_from_axis_angle(axis_a),
center=cylinder_segment.get_center())
# cylinder_segment = cylinder_segment.rotate(
# axis_a, center=True, type=o3d.geometry.RotationType.AxisAngle)
# color cylinder
color = self.colors if self.colors.ndim == 1 else self.colors[i, :]
cylinder_segment.paint_uniform_color(color)
self.cylinder_segments.append(cylinder_segment)
def add_line(self, vis):
"""Adds this line to the visualizer"""
for cylinder in self.cylinder_segments:
vis.add_geometry(cylinder)
def remove_line(self, vis):
"""Removes this line from the visualizer"""
for cylinder in self.cylinder_segments:
vis.remove_geometry(cylinder)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--img',
required=True,
help='Image texture in equirectangular format')
parser.add_argument('--layout',
required=True,
help='Txt file containing layout corners (cor_id)')
parser.add_argument('--camera_height',
default=1.6,
type=float,
help='Camera height in meter (not the viewer camera)')
parser.add_argument('--ppm', default=80, type=int, help='Points per meter')
parser.add_argument('--point_size',
default=0.0025,
type=int,
help='Point size')
parser.add_argument('--ignore_floor',
action='store_true',
help='Skip rendering floor')
parser.add_argument('--ignore_ceiling',
action='store_true',
help='Skip rendering ceiling')
parser.add_argument('--ignore_wireframe',
action='store_true',
help='Skip rendering wireframe')
args = parser.parse_args()
# Reading source (texture img, cor_id txt)
equirect_texture = np.array(Image.open(args.img)) / 255.0
H, W = equirect_texture.shape[:2]
with open(args.layout) as f:
inferenced_result = json.load(f)
cor_id = np.array(inferenced_result['uv'], np.float32)
cor_id[:, 0] *= W
cor_id[:, 1] *= H
ceil_floor_mask = create_ceiling_floor_mask(cor_id, H, W)
# Convert cor_id to 3d xyz
N = len(cor_id) // 2
floor_z = -args.camera_height
floor_xy = np_coor2xy(cor_id[1::2], floor_z, W, H, floorW=1, floorH=1)
c = np.sqrt((floor_xy**2).sum(1))
v = np_coory2v(cor_id[0::2, 1], H)
ceil_z = (c * np.tan(v)).mean()
# Prepare
if not args.ignore_wireframe:
assert N == len(floor_xy)
wf_points = [[-x, y, floor_z] for x, y in floor_xy] +\
[[-x, y, ceil_z] for x, y in floor_xy]
wf_lines = [[i, (i+1)%N] for i in range(N)] +\
[[i+N, (i+1)%N+N] for i in range(N)] +\
[[i, i+N] for i in range(N)]
wf_colors = [[0, 1, 0] if i % 2 == 0 else [0, 0, 1] for i in range(N)] +\
[[0, 1, 0] if i % 2 == 0 else [0, 0, 1] for i in range(N)] +\
[[1, 0, 0] for i in range(N)]
wf_line_set = open3d.geometry.LineSet()
wf_line_set.points = open3d.utility.Vector3dVector(wf_points)
wf_line_set.lines = open3d.utility.Vector2iVector(wf_lines)
wf_line_set.colors = open3d.utility.Vector3dVector(wf_colors)
# Warp each wall
all_rgb, all_xyz = warp_walls(equirect_texture, floor_xy, floor_z, ceil_z,
args.ppm)
# Warp floor and ceiling
if not args.ignore_floor or not args.ignore_ceiling:
fi, fp, ci, cp = warp_floor_ceiling(equirect_texture,
ceil_floor_mask,
floor_xy,
floor_z,
ceil_z,
ppm=args.ppm)
if not args.ignore_floor:
all_rgb.extend(fi)
all_xyz.extend(fp)
if not args.ignore_ceiling:
all_rgb.extend(ci)
all_xyz.extend(cp)
all_xyz = np.array(all_xyz)
all_rgb = np.array(all_rgb)
all_xyz = all_xyz * np.array([-1,1,1])
# Filter occluded points
occlusion_mask, reord_idx = create_occlusion_mask(all_xyz)
all_xyz = all_xyz[reord_idx][~occlusion_mask]
all_rgb = all_rgb[reord_idx][~occlusion_mask]
# Launch point cloud viewer
print('Showing %d of points...' % len(all_rgb))
pcd = open3d.geometry.PointCloud()
pcd.points = open3d.utility.Vector3dVector(all_xyz)
pcd.colors = open3d.utility.Vector3dVector(all_rgb)
# Visualize result
tobe_visualize = [pcd]
if not args.ignore_wireframe:
# tobe_visualize.append(wf_line_set)
line_mesh1 = LineMesh(wf_points, wf_lines, wf_colors, radius=0.04)
line_mesh1_geoms = line_mesh1.cylinder_segments
tobe_visualize.extend(line_mesh1_geoms)
open3d.visualization.RenderOption.line_width = 10.0
open3d.visualization.draw_geometries(tobe_visualize)
| 15,893 | 37.115108 | 163 | py |
DMH-Net | DMH-Net-main/eval_cuboid.py | import os
import json
import glob
import argparse
import warnings
import numpy as np
from tqdm import tqdm
from scipy.spatial import HalfspaceIntersection
from scipy.spatial import ConvexHull
from misc import post_proc, panostretch
def tri2halfspace(pa, pb, p):
''' Helper function for evaluating 3DIoU '''
v1 = pa - p
v2 = pb - p
vn = np.cross(v1, v2)
if -vn @ p > 0:
vn = -vn
return [*vn, -vn @ p]
def xyzlst2halfspaces(xyz_floor, xyz_ceil):
'''
Helper function for evaluating 3DIoU
return halfspace enclose (0, 0, 0)
'''
N = xyz_floor.shape[0]
halfspaces = []
for i in range(N):
last_i = (i - 1 + N) % N
next_i = (i + 1) % N
p_floor_a = xyz_floor[last_i]
p_floor_b = xyz_floor[next_i]
p_floor = xyz_floor[i]
p_ceil_a = xyz_ceil[last_i]
p_ceil_b = xyz_ceil[next_i]
p_ceil = xyz_ceil[i]
halfspaces.append(tri2halfspace(p_floor_a, p_floor_b, p_floor))
halfspaces.append(tri2halfspace(p_floor_a, p_ceil, p_floor))
halfspaces.append(tri2halfspace(p_ceil, p_floor_b, p_floor))
halfspaces.append(tri2halfspace(p_ceil_a, p_ceil_b, p_ceil))
halfspaces.append(tri2halfspace(p_ceil_a, p_floor, p_ceil))
halfspaces.append(tri2halfspace(p_floor, p_ceil_b, p_ceil))
return np.array(halfspaces)
def eval_3diou(dt_floor_coor, dt_ceil_coor, gt_floor_coor, gt_ceil_coor, ch=-1.6,
coorW=1024, coorH=512, floorW=1024, floorH=512):
''' Evaluate 3D IoU using halfspace intersection '''
dt_floor_coor = np.array(dt_floor_coor)
dt_ceil_coor = np.array(dt_ceil_coor)
gt_floor_coor = np.array(gt_floor_coor)
gt_ceil_coor = np.array(gt_ceil_coor)
assert (dt_floor_coor[:, 0] != dt_ceil_coor[:, 0]).sum() == 0
assert (gt_floor_coor[:, 0] != gt_ceil_coor[:, 0]).sum() == 0
N = len(dt_floor_coor)
dt_floor_xyz = np.hstack([
post_proc.np_coor2xy(dt_floor_coor, ch, coorW, coorH, floorW=1, floorH=1),
np.zeros((N, 1)) + ch,
])
gt_floor_xyz = np.hstack([
post_proc.np_coor2xy(gt_floor_coor, ch, coorW, coorH, floorW=1, floorH=1),
np.zeros((N, 1)) + ch,
])
dt_c = np.sqrt((dt_floor_xyz[:, :2] ** 2).sum(1))
gt_c = np.sqrt((gt_floor_xyz[:, :2] ** 2).sum(1))
dt_v2 = post_proc.np_coory2v(dt_ceil_coor[:, 1], coorH)
gt_v2 = post_proc.np_coory2v(gt_ceil_coor[:, 1], coorH)
dt_ceil_z = dt_c * np.tan(dt_v2)
gt_ceil_z = gt_c * np.tan(gt_v2)
dt_ceil_xyz = dt_floor_xyz.copy()
dt_ceil_xyz[:, 2] = dt_ceil_z
gt_ceil_xyz = gt_floor_xyz.copy()
gt_ceil_xyz[:, 2] = gt_ceil_z
dt_halfspaces = xyzlst2halfspaces(dt_floor_xyz, dt_ceil_xyz)
gt_halfspaces = xyzlst2halfspaces(gt_floor_xyz, gt_ceil_xyz)
in_halfspaces = HalfspaceIntersection(np.concatenate([dt_halfspaces, gt_halfspaces]), np.zeros(3))
dt_halfspaces = HalfspaceIntersection(dt_halfspaces, np.zeros(3))
gt_halfspaces = HalfspaceIntersection(gt_halfspaces, np.zeros(3))
in_volume = ConvexHull(in_halfspaces.intersections).volume
dt_volume = ConvexHull(dt_halfspaces.intersections).volume
gt_volume = ConvexHull(gt_halfspaces.intersections).volume
un_volume = dt_volume + gt_volume - in_volume
return 100 * in_volume / un_volume
def gen_reg_from_xy(xy, w):
xy = xy[np.argsort(xy[:, 0])]
return np.interp(np.arange(w), xy[:, 0], xy[:, 1], period=w)
def test(dt_cor_id, z0, z1, gt_cor_id, w, h, losses):
# Eval corner error
mse = np.sqrt(((gt_cor_id - dt_cor_id) ** 2).sum(1)).mean()
ce_loss = 100 * mse / np.sqrt(w ** 2 + h ** 2)
# Pixel surface error (3 labels: ceiling, wall, floor)
y0_dt = []
y0_gt = []
y1_gt = []
for j in range(4):
coorxy = panostretch.pano_connect_points(dt_cor_id[j * 2],
dt_cor_id[(j * 2 + 2) % 8],
-z0)
y0_dt.append(coorxy)
coorxy = panostretch.pano_connect_points(gt_cor_id[j * 2],
gt_cor_id[(j * 2 + 2) % 8],
-z0)
y0_gt.append(coorxy)
coorxy = panostretch.pano_connect_points(gt_cor_id[j * 2 + 1],
gt_cor_id[(j * 2 + 3) % 8],
z0)
y1_gt.append(coorxy)
y0_dt = gen_reg_from_xy(np.concatenate(y0_dt, 0), w)
y1_dt = post_proc.infer_coory(y0_dt, z1 - z0, z0)
y0_gt = gen_reg_from_xy(np.concatenate(y0_gt, 0), w)
y1_gt = gen_reg_from_xy(np.concatenate(y1_gt, 0), w)
surface = np.zeros((h, w), dtype=np.int32)
surface[np.round(y0_dt).astype(int), np.arange(w)] = 1
surface[np.round(y1_dt).astype(int), np.arange(w)] = 1
surface = np.cumsum(surface, axis=0)
surface_gt = np.zeros((h, w), dtype=np.int32)
surface_gt[np.round(y0_gt).astype(int), np.arange(w)] = 1
surface_gt[np.round(y1_gt).astype(int), np.arange(w)] = 1
surface_gt = np.cumsum(surface_gt, axis=0)
pe_loss = 100 * (surface != surface_gt).sum() / (h * w)
# Eval 3d IoU
try:
iou3d = eval_3diou(dt_cor_id[1::2], dt_cor_id[0::2], gt_cor_id[1::2], gt_cor_id[0::2])
except:
warnings.warn("cannot give iou3d!")
iou3d = 0.0
losses['CE'] = ce_loss
losses['PE'] = pe_loss
losses['3DIoU'] = iou3d
def prepare_gtdt_pairs(gt_glob, dt_glob):
gt_paths = sorted(glob.glob(gt_glob))
dt_paths = dict([(os.path.split(v)[-1].split('.')[0], v)
for v in glob.glob(dt_glob) if v.endswith('json')])
gtdt_pairs = []
for gt_path in gt_paths:
k = os.path.split(gt_path)[-1].split('.')[0]
if k in dt_paths:
gtdt_pairs.append((gt_path, dt_paths[k]))
return gtdt_pairs
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dt_glob', required=True,
help='NOTE: Remeber to quote your glob path.'
'Files assumed to be json from inference.py')
parser.add_argument('--gt_glob', default='data/test/label_cor/*txt',
help='NOTE: Remeber to quote your glob path.'
'Files assumed to be txt')
parser.add_argument('--w', default=1024, type=int, help='GT images width')
parser.add_argument('--h', default=512, type=int, help='GT images height')
args = parser.parse_args()
# Prepare (gt, dt) pairs
gtdt_pairs = prepare_gtdt_pairs(args.gt_glob, args.dt_glob)
# Testing
losses = {
'CE': [],
'PE': [],
'3DIoU': [],
}
for gt_path, dt_path in tqdm(gtdt_pairs, desc='Testing'):
with open(gt_path) as f:
gt_cor_id = np.array([l.split() for l in f], np.float32)
with open(dt_path) as f:
dt = json.load(f)
dt_cor_id = np.array(dt['uv'], np.float32)
dt_cor_id[:, 0] *= args.w
dt_cor_id[:, 1] *= args.h
test(dt_cor_id, dt['z0'], dt['z1'], gt_cor_id, args.w, args.h, losses)
print(' Testing Result '.center(50, '='))
print('Corner Error (%):', np.mean(losses['CE']))
print('Pixel Error (%):', np.mean(losses['PE']))
print('3DIoU (%):', np.mean(losses['3DIoU']))
print('=' * 50)
| 7,422 | 35.033981 | 102 | py |
DMH-Net | DMH-Net-main/e2pconvert_torch/convertExUtils.py | from functools import reduce
import numpy as np
import torch
from py360convert import rotation_matrix
from e2pconvert_torch import torch360convert
def rotationMatrix(u, v, in_rot):
Rx = rotation_matrix(v, [1, 0, 0])
Ry = rotation_matrix(u, [0, 1, 0])
Ri = rotation_matrix(in_rot, np.array([0, 0, 1.0]).dot(Rx).dot(Ry))
return Rx.dot(Ry).dot(Ri)
def unitxyzToPerspectiveCoord(input, fov_deg, u_deg, v_deg, out_hw, in_rot_deg=0):
"""
把unitxyz转化为单个perspective image上的xy像素坐标。
:param input: (n, 3) n个点、每个点三维坐标(unitxyz)
:param fov_deg: 同py360convert.e2p函数
:param u_deg: 同py360convert.e2p函数
:param v_deg: 同py360convert.e2p函数
:param out_hw: 同py360convert.e2p函数
:param in_rot_deg: 同py360convert.e2p函数
:return: 元组。第一个元素是(n, 2) n个点、每个点在图像上的两维坐标(以像素为单位);第二个元素是(n, 3),每个点在透视坐标系中的坐标
"""
assert len(input.shape) == 2 and input.shape[1] == 3
h_fov, v_fov = fov_deg[0] * np.pi / 180, fov_deg[1] * np.pi / 180
in_rot = in_rot_deg * np.pi / 180
u = -u_deg * np.pi / 180
v = v_deg * np.pi / 180
rotMat = rotationMatrix(u, v, in_rot)
rotMat = input.new_tensor(rotMat)
imgXyz = input.matmul(rotMat.T) # 对于旋转矩阵,其逆等于其转置
imgXyz = imgXyz / torch.abs(imgXyz[:, 2:]) # 使得z变为1
# 根据x y和fov,计算出可视化区域的range
x_max = np.tan(h_fov / 2)
y_max = np.tan(v_fov / 2)
# 将像素范围和range范围建立线性对应
normed_pos = imgXyz[:, :2] / torch.tensor([x_max, -y_max], dtype=imgXyz.dtype, device=imgXyz.device) / 2 + 0.5
pos = normed_pos * torch.tensor([out_hw[1] - 1, out_hw[0] - 1], dtype=imgXyz.dtype, device=imgXyz.device)
return pos, imgXyz
def coordE2P(input_pts, img, fov_deg, u_deg, v_deg, out_hw, in_rot_deg=0, isInputUv=False):
"""
把equirect下的坐标转换为perspective下的坐标
:param input: (n, 3) n个点、每个点三维坐标(unitxyz)
:param img: 全景equirect图像
:param fov_deg: 同py360convert.e2p函数
:param u_deg: 同py360convert.e2p函数
:param v_deg: 同py360convert.e2p函数
:param out_hw: 同py360convert.e2p函数
:param in_rot_deg: 同py360convert.e2p函数
:return: 元组。第一个元素是(n, 2) n个点、每个点两维坐标;第二个元素是整数类型(n) 表示每个点的类型:
0表示点不在180度视角内(在相机后面),xy值无意义;1表示点在180度视角内、但不在图片内,2表示点在图片内。
"""
if not isInputUv: uv = torch360convert.coor2uv(input_pts, img.shape[1], img.shape[2])
else:
uv = input_pts
xyz = torch360convert.uv2unitxyz(uv)
result, imgXyz = unitxyzToPerspectiveCoord(xyz, fov_deg, u_deg, v_deg, out_hw, in_rot_deg)
type = (imgXyz[:, 2] > 0).to(torch.int8)
inimage_mask = reduce(torch.logical_and, [type, result[:, 0] >= 0, result[:, 0] <= out_hw[1] - 1, result[:, 1] >= 0,
result[:, 1] <= out_hw[0] - 1])
type[inimage_mask] = 2
return result, type, imgXyz
| 2,723 | 35.810811 | 120 | py |
DMH-Net | DMH-Net-main/e2pconvert_torch/e2plabelconvert.py | import numpy as np
import torch
from .convertExUtils import coordE2P
def linesPostProcess(lines, img_hw, is_updown_view):
"""
对线进行处理,筛选掉看不见的线、对线的起终点进行规范化处理
:param lines:(k,7),表示图中的k条线。每条线用七个数表示,前两个是端点在points中的序号,然后是线的类型:0是竖直的墙壁线,1是天花板线,2是地板线,然后是起点的x、y坐标,然后是终点的x、y坐标
:param img_hw:(2),图片的宽和高
:return (m,8)图中能看到的m条线。每条线前7个数含义同上,第8个数表示线在视图中的方向:0竖直线1水平线2过中心线
"""
# !!!根据10月12日推导出的结果:使用py360convert的e2p、e2c变换得出的点,3d空间中坐标-1~1对应的范围应当是2d图片中像素0~h-1,而不是0~h!
# 以h=512为例,在图片2d坐标系下的511.5是没有意义(在当前平面内不可见)的!
# 因此作出修改!
# 虽然分析觉得,这个小错误并不会对结果有实质性的影响(因为最多只会差一个像素),但还是改过来吧!
# unitxyzToPerspectiveCoord、coordE2P、generateOnePerspectiveLabel、lineCoordToRatio四个函数也做了相同的修改。
img_hw = [img_hw[0] - 1, img_hw[1] - 1]
def processPoint(point, k):
xInRange = False
if point[0] < 0:
y = k * (0 - point[0]) + point[1]
if 0 <= y <= img_hw[0]:
return torch.stack([y.new_tensor(0), y])
elif point[0] > img_hw[1]:
y = k * (img_hw[1] - point[0]) + point[1]
if 0 <= y <= img_hw[0]:
return torch.stack([y.new_tensor(img_hw[1]), y])
else:
xInRange = True
if point[1] < 0:
x = point[0]
if not torch.isinf(k): x = x + (0 - point[1]) / k # 否则,当k是inf时,虽然前向传播没问题,但反向传播会出现grad=nan
if 0 <= x <= img_hw[1]:
return torch.stack([x, x.new_tensor(0)])
elif point[1] > img_hw[0]:
x = point[0]
if not torch.isinf(k): x = x + (img_hw[0] - point[1]) / k
if 0 <= x <= img_hw[1]:
return torch.stack([x, x.new_tensor(img_hw[0])])
else:
if xInRange:
return point
return None
result = []
for line in lines:
k = (line[6] - line[4]) / (line[5] - line[3])
p1Res = processPoint(line[3:5], k)
p2Res = processPoint(line[5:7], k)
if p1Res is not None and p2Res is not None:
if line[2] == 0:
direct = 2 if is_updown_view else 0
else:
if is_updown_view:
direct = 1 if -1 <= k <= 1 else 0
else:
yLR = (k * (0 - p1Res[0]) + p1Res[1], k * (img_hw[1] - p1Res[0]) + p1Res[1])
if (yLR[0] < img_hw[0] / 2 and yLR[1] > img_hw[0] / 2) or (
yLR[1] < img_hw[0] / 2 and yLR[0] > img_hw[0] / 2):
direct = 2
else:
direct = 1
result.append(torch.cat((line[0:3], p1Res, p2Res, line.new_tensor([direct]))))
return result
def generateOnePerspectiveLabel(e_img, e_label, fov_deg, u_deg, v_deg, out_hw,
in_rot_deg=0, isInputUv=False):
"""
根据给定的perspective参数,生成label信息,并可以可视化。
:param e_img: 全景equirect图像
:param e_label: (n, 2)在全景equirect坐标系下的gt角点坐标
:param ax: 画图的pyplot.Axes
# :param img_save_path: 若传入True,则把结果图像通过plt.show显示;若传入其他字符串,则保存为文件;否则不显示和保存结果图像。
:param fov_deg: 同py360convert.e2p函数
:param u_deg: 同py360convert.e2p函数
:param v_deg: 同py360convert.e2p函数
:param out_hw: 同py360convert.e2p函数
:param in_rot_deg: 同py360convert.e2p函数
:return: 字典,是图片中的点和线段信息,内含"points" "point_types" "lines" "lines" 三个字段。
points:(n,2),表示点在图片中的x、y坐标(浮点数)
point_types:整数(n),对应于points中每个点表示其类型,0表示点不在180度视角内(在相机后面),xy值无意义;1表示点在180度视角内、但不在图片内,2表示点在图片内。
lines:(k,8),表示图中的k条线。每条线用七个数表示,前两个是端点在points中的序号,然后是线的类型:0是竖直的墙壁线,1是天花板线,2是地板线,然后是起点的x、y坐标,然后是终点的x、y坐标,然后是线在视图中的类型-0竖直线1水平线2过中心线
"""
points, point_types, imgXyz = coordE2P(e_label, e_img, fov_deg, u_deg, v_deg, out_hw, in_rot_deg, isInputUv=isInputUv)
lines = []
corner_count = e_label.shape[0]
# 定义三种交线。内含12个数组代表长方体的12条边。
# 每个数组的前两个元素对应着label中按顺序给定的点的序号。
# 第三个元素表示这条线的类别。0是竖直的墙壁线,1是天花板线,2是地板线。
LINES = []
for i in range(0, corner_count, 2):
LINES.append([i, i + 1, 0])
LINES.append([i, (i + 2) % corner_count, 1])
LINES.append([i + 1, (i + 3) % corner_count, 2])
for l in LINES:
if point_types[l[0]] > 0 and point_types[l[1]] > 0:
# 两个点都在相机前方180度视角范围内
lines.append(torch.cat([points.new_tensor(l), points[l[0]], points[l[1]]]))
elif point_types[l[0]] == 2 or point_types[l[1]] == 2:
# 只有一个点在图像内、另一个点在相机后方
# 从连成的直线上找合适的点,这个点投影位于边线上
line = torch.cat([points.new_tensor(l), points[l[0]], points[l[1]]])
if point_types[l[0]] == 2:
p1 = imgXyz[l[0]]
p2 = imgXyz[l[1]]
toFill = 1
else:
p1 = imgXyz[l[1]]
p2 = imgXyz[l[0]]
toFill = 0
tantheta = np.tan((180 - fov_deg[0]) / 2 * np.pi / 180)
# pc:p1和p2连线,与FOV平面的交点。k应该能保证在0.5~1之间。pc的x应该能保证非常接近边缘(1或-1)?
k1 = (p2[0] * tantheta - p2[2]) / ((p1[2] - p2[2]) - ((p1[0] - p2[0]) * tantheta))
k2 = (p2[0] * -tantheta - p2[2]) / ((p1[2] - p2[2]) - ((p1[0] - p2[0]) * -tantheta))
k = k1 if 0.5 <= k1 <= 1 else k2
pc = p1 * k + p2 * (1 - k)
assert 0.5 <= k <= 1 and pc[2] > 0, "k error"
pc = pc / pc[2]
assert -0.01 <= (abs(pc[0]) - 1) <= 0.01, "pc error"
# 把pc的坐标,往图像上映射
h_fov, v_fov = fov_deg[0] * np.pi / 180, fov_deg[1] * np.pi / 180
x_max = np.tan(h_fov / 2)
y_max = np.tan(v_fov / 2)
normed_pos = pc[:2] / torch.tensor([x_max, -y_max], dtype=imgXyz.dtype, device=imgXyz.device) / 2 + 0.5
pos = normed_pos * torch.tensor([out_hw[1] - 1, out_hw[0] - 1], dtype=imgXyz.dtype, device=imgXyz.device)
if toFill == 0:
line = torch.cat([line[0:3], pos, line[5:7]])
elif toFill == 1:
line = torch.cat([line[0:5], pos])
# line[2 * toFill + 3:2 * toFill + 5] = pos # 把pc点的图像坐标填入
lines.append(line)
lines = linesPostProcess(lines, out_hw, v_deg != 0) # 去除看不见的线
result = {
"points": points,
"point_types": point_types,
"lines": lines
}
return result
| 6,202 | 41.197279 | 132 | py |
DMH-Net | DMH-Net-main/e2pconvert_torch/torch360convert.py | import numpy as np
import torch
def coor2uv(coorxy, h, w):
coor_x, coor_y = coorxy[:, 0:1], coorxy[:, 1:2]
u = ((coor_x + 0.5) / w - 0.5) * 2 * np.pi
v = -((coor_y + 0.5) / h - 0.5) * np.pi
return torch.cat([u, v], -1)
def uv2unitxyz(uv):
u, v = uv[:, 0:1], uv[:, 1:2]
y = torch.sin(v)
c = torch.cos(v)
x = c * torch.sin(u)
z = c * torch.cos(u)
return torch.cat([x, y, z], -1)
def uv2coor(uv, h, w):
'''
uv: ndarray in shape of [..., 2]
h: int, height of the equirectangular image
w: int, width of the equirectangular image
'''
u, v = uv[:, 0:1], uv[:, 1:2]
coor_x = (u / (2 * np.pi) + 0.5) * w - 0.5
coor_y = (-v / np.pi + 0.5) * h - 0.5
return torch.cat([coor_x, coor_y], -1)
def xyz2uv(xyz):
'''
xyz: ndarray in shape of [..., 3]
'''
x, y, z = xyz[:, 0:1], xyz[:, 1:2], xyz[:, 2:3]
u = torch.atan2(x, z)
c = torch.sqrt(x ** 2 + z ** 2)
v = torch.atan2(y, c)
return torch.cat([u, v], -1)
| 1,010 | 20.978261 | 51 | py |
DMH-Net | DMH-Net-main/postprocess/LayoutNet_post_proc2.py | import numpy as np
from scipy.ndimage import map_coordinates
from scipy.spatial.distance import pdist, squareform
from sklearn.decomposition import PCA
PI = float(np.pi)
def fuv2img(fuv, coorW=1024, floorW=1024, floorH=512):
'''
Project 1d signal in uv space to 2d floor plane image
'''
floor_plane_x, floor_plane_y = np.meshgrid(range(floorW), range(floorH))
floor_plane_x, floor_plane_y = -(floor_plane_y - floorH / 2), floor_plane_x - floorW / 2
floor_plane_coridx = (np.arctan2(floor_plane_y, floor_plane_x) / (2 * PI) + 0.5) * coorW - 0.5
floor_plane = map_coordinates(fuv, floor_plane_coridx.reshape(1, -1), order=1, mode='wrap')
floor_plane = floor_plane.reshape(floorH, floorW)
return floor_plane
def np_coorx2u(coorx, coorW=1024):
return ((coorx + 0.5) / coorW - 0.5) * 2 * PI
def np_coory2v(coory, coorH=512):
return -((coory + 0.5) / coorH - 0.5) * PI
def np_coor2xy(coor, z=50, coorW=1024, coorH=512, floorW=1024, floorH=512):
'''
coor: N x 2, index of array in (col, row) format
'''
coor = np.array(coor)
u = np_coorx2u(coor[:, 0], coorW)
v = np_coory2v(coor[:, 1], coorH)
c = z / np.tan(v)
x = c * np.sin(u) + floorW / 2 - 0.5
y = -c * np.cos(u) + floorH / 2 - 0.5
return np.hstack([x[:, None], y[:, None]])
def np_x_u_solve_y(x, u, floorW=1024, floorH=512):
c = (x - floorW / 2 + 0.5) / np.sin(u)
return -c * np.cos(u) + floorH / 2 - 0.5
def np_y_u_solve_x(y, u, floorW=1024, floorH=512):
c = -(y - floorH / 2 + 0.5) / np.cos(u)
return c * np.sin(u) + floorW / 2 - 0.5
def np_xy2coor(xy, z=50, coorW=1024, coorH=512, floorW=1024, floorH=512):
'''
xy: N x 2
'''
x = xy[:, 0] - floorW / 2 + 0.5
y = xy[:, 1] - floorH / 2 + 0.5
u = np.arctan2(x, -y)
v = np.arctan(z / np.sqrt(x**2 + y**2))
coorx = (u / (2 * PI) + 0.5) * coorW - 0.5
coory = (-v / PI + 0.5) * coorH - 0.5
return np.hstack([coorx[:, None], coory[:, None]])
def mean_percentile(vec, p1=25, p2=75):
vmin = np.percentile(vec, p1)
vmax = np.percentile(vec, p2)
return vec[(vmin <= vec) & (vec <= vmax)].mean()
def vote(vec, tol):
vec = np.sort(vec)
n = np.arange(len(vec))[::-1]
n = n[:, None] - n[None, :] + 1.0
l = squareform(pdist(vec[:, None], 'minkowski', p=1) + 1e-9)
invalid = (n < len(vec) * 0.4) | (l > tol)
if (~invalid).sum() == 0 or len(vec) < tol:
best_fit = np.median(vec)
p_score = 0
else:
l[invalid] = 1e5
n[invalid] = -1
score = n
max_idx = score.argmax()
max_row = max_idx // len(vec)
max_col = max_idx % len(vec)
assert max_col > max_row
best_fit = vec[max_row:max_col+1].mean()
p_score = (max_col - max_row + 1) / len(vec)
l1_score = np.abs(vec - best_fit).mean()
return best_fit, p_score, l1_score
def get_z1(coory0, coory1, z0=50, coorH=512):
v0 = np_coory2v(coory0, coorH)
v1 = np_coory2v(coory1, coorH)
c0 = z0 / np.tan(v0)
z1 = c0 * np.tan(v1)
return z1
def np_refine_by_fix_z(coory0, coory1, z0=50, coorH=512):
'''
Refine coory1 by coory0
coory0 are assumed on given plane z
'''
v0 = np_coory2v(coory0, coorH)
v1 = np_coory2v(coory1, coorH)
c0 = z0 / np.tan(v0)
z1 = c0 * np.tan(v1)
z1_mean = mean_percentile(z1)
v1_refine = np.arctan2(z1_mean, c0)
coory1_refine = (-v1_refine / PI + 0.5) * coorH - 0.5
return coory1_refine, z1_mean
def infer_coory(coory0, h, z0=50, coorH=512):
v0 = np_coory2v(coory0, coorH)
c0 = z0 / np.tan(v0)
z1 = z0 + h
v1 = np.arctan2(z1, c0)
return (-v1 / PI + 0.5) * coorH - 0.5
def get_gpid(coorx, coorW):
gpid = np.zeros(coorW)
gpid[np.round(coorx).astype(int)] = 1
gpid = np.cumsum(gpid).astype(int)
gpid[gpid == gpid[-1]] = 0
return gpid
def get_gpid_idx(gpid, j):
idx = np.where(gpid == j)[0]
if idx[0] == 0 and idx[-1] != len(idx) - 1:
_shift = -np.where(idx != np.arange(len(idx)))[0][0]
idx = np.roll(idx, _shift)
return idx
def gpid_two_split(xy, tpid_a, tpid_b):
m = np.arange(len(xy)) + 1
cum_a = np.cumsum(xy[:, tpid_a])
cum_b = np.cumsum(xy[::-1, tpid_b])
l1_a = cum_a / m - cum_a / (m * m)
l1_b = cum_b / m - cum_b / (m * m)
l1_b = l1_b[::-1]
score = l1_a[:-1] + l1_b[1:]
best_split = score.argmax() + 1
va = xy[:best_split, tpid_a].mean()
vb = xy[best_split:, tpid_b].mean()
return va, vb
def _get_rot_rad(px, py):
if px < 0:
px, py = -px, -py
rad = np.arctan2(py, px) * 180 / np.pi
if rad > 45:
return 90 - rad
if rad < -45:
return -90 - rad
return -rad
def get_rot_rad(init_coorx, coory, z=50, coorW=1024, coorH=512, floorW=1024, floorH=512, tol=5):
gpid = get_gpid(init_coorx, coorW)
coor = np.hstack([np.arange(coorW)[:, None], coory[:, None]])
xy = np_coor2xy(coor, z, coorW, coorH, floorW, floorH)
xy_cor = []
rot_rad_suggestions = []
for j in range(len(init_coorx)):
pca = PCA(n_components=1)
pca.fit(xy[gpid == j])
rot_rad_suggestions.append(_get_rot_rad(*pca.components_[0]))
rot_rad_suggestions = np.sort(rot_rad_suggestions + [1e9])
rot_rad = np.mean(rot_rad_suggestions[:-1])
best_rot_rad_sz = -1
last_j = 0
for j in range(1, len(rot_rad_suggestions)):
if rot_rad_suggestions[j] - rot_rad_suggestions[j-1] > tol:
last_j = j
elif j - last_j > best_rot_rad_sz:
rot_rad = rot_rad_suggestions[last_j:j+1].mean()
best_rot_rad_sz = j - last_j
dx = int(round(rot_rad * 1024 / 360))
return dx, rot_rad
def gen_ww_cuboid(xy, gpid, tol):
xy_cor = []
assert len(np.unique(gpid)) == 4
# For each part seperated by wall-wall peak, voting for a wall
for j in range(4):
now_x = xy[gpid == j, 0]
now_y = xy[gpid == j, 1]
new_x, x_score, x_l1 = vote(now_x, tol)
new_y, y_score, y_l1 = vote(now_y, tol)
if (x_score, -x_l1) > (y_score, -y_l1):
xy_cor.append({'type': 0, 'val': new_x, 'score': x_score})
else:
xy_cor.append({'type': 1, 'val': new_y, 'score': y_score})
# Sanity fallback
scores = [0, 0]
for j in range(4):
if xy_cor[j]['type'] == 0:
scores[j % 2] += xy_cor[j]['score']
else:
scores[j % 2] -= xy_cor[j]['score']
if scores[0] > scores[1]:
xy_cor[0]['type'] = 0
xy_cor[1]['type'] = 1
xy_cor[2]['type'] = 0
xy_cor[3]['type'] = 1
else:
xy_cor[0]['type'] = 1
xy_cor[1]['type'] = 0
xy_cor[2]['type'] = 1
xy_cor[3]['type'] = 0
return xy_cor
def gen_ww_general(init_coorx, xy, gpid, tol):
xy_cor = []
assert len(init_coorx) == len(np.unique(gpid))
# Candidate for each part seperated by wall-wall boundary
for j in range(len(init_coorx)):
now_x = xy[gpid == j, 0]
now_y = xy[gpid == j, 1]
new_x, x_score, x_l1 = vote(now_x, tol)
new_y, y_score, y_l1 = vote(now_y, tol)
u0 = np_coorx2u(init_coorx[(j - 1 + len(init_coorx)) % len(init_coorx)])
u1 = np_coorx2u(init_coorx[j])
if (x_score, -x_l1) > (y_score, -y_l1):
xy_cor.append({'type': 0, 'val': new_x, 'score': x_score, 'action': 'ori', 'gpid': j, 'u0': u0, 'u1': u1, 'tbd': True})
else:
xy_cor.append({'type': 1, 'val': new_y, 'score': y_score, 'action': 'ori', 'gpid': j, 'u0': u0, 'u1': u1, 'tbd': True})
# Construct wall from highest score to lowest
while True:
# Finding undetermined wall with highest score
tbd = -1
for i in range(len(xy_cor)):
if xy_cor[i]['tbd'] and (tbd == -1 or xy_cor[i]['score'] > xy_cor[tbd]['score']):
tbd = i
if tbd == -1:
break
# This wall is determined
xy_cor[tbd]['tbd'] = False
p_idx = (tbd - 1 + len(xy_cor)) % len(xy_cor)
n_idx = (tbd + 1) % len(xy_cor)
num_tbd_neighbor = xy_cor[p_idx]['tbd'] + xy_cor[n_idx]['tbd']
# Two adjacency walls are not determined yet => not special case
if num_tbd_neighbor == 2:
continue
# Only one of adjacency two walls is determine => add now or later special case
if num_tbd_neighbor == 1:
if (not xy_cor[p_idx]['tbd'] and xy_cor[p_idx]['type'] == xy_cor[tbd]['type']) or\
(not xy_cor[n_idx]['tbd'] and xy_cor[n_idx]['type'] == xy_cor[tbd]['type']):
# Current wall is different from one determined adjacency wall
if xy_cor[tbd]['score'] >= -1:
# Later special case, add current to tbd
xy_cor[tbd]['tbd'] = True
xy_cor[tbd]['score'] -= 100
else:
# Fallback: forced change the current wall or infinite loop
if not xy_cor[p_idx]['tbd']:
insert_at = tbd
if xy_cor[p_idx]['type'] == 0:
new_val = np_x_u_solve_y(xy_cor[p_idx]['val'], xy_cor[p_idx]['u1'])
new_type = 1
else:
new_val = np_y_u_solve_x(xy_cor[p_idx]['val'], xy_cor[p_idx]['u1'])
new_type = 0
else:
insert_at = n_idx
if xy_cor[n_idx]['type'] == 0:
new_val = np_x_u_solve_y(xy_cor[n_idx]['val'], xy_cor[n_idx]['u0'])
new_type = 1
else:
new_val = np_y_u_solve_x(xy_cor[n_idx]['val'], xy_cor[n_idx]['u0'])
new_type = 0
new_add = {'type': new_type, 'val': new_val, 'score': 0, 'action': 'forced infer', 'gpid': -1, 'u0': -1, 'u1': -1, 'tbd': False}
xy_cor.insert(insert_at, new_add)
continue
# Below checking special case
if xy_cor[p_idx]['type'] == xy_cor[n_idx]['type']:
# Two adjacency walls are same type, current wall should be differen type
if xy_cor[tbd]['type'] == xy_cor[p_idx]['type']:
# Fallback: three walls with same type => forced change the middle wall
xy_cor[tbd]['type'] = (xy_cor[tbd]['type'] + 1) % 2
xy_cor[tbd]['action'] = 'forced change'
xy_cor[tbd]['val'] = xy[gpid == xy_cor[tbd]['gpid'], xy_cor[tbd]['type']].mean()
else:
# Two adjacency walls are different type => add one
tp0 = xy_cor[n_idx]['type']
tp1 = xy_cor[p_idx]['type']
if xy_cor[p_idx]['type'] == 0:
val0 = np_x_u_solve_y(xy_cor[p_idx]['val'], xy_cor[p_idx]['u1'])
val1 = np_y_u_solve_x(xy_cor[n_idx]['val'], xy_cor[n_idx]['u0'])
else:
val0 = np_y_u_solve_x(xy_cor[p_idx]['val'], xy_cor[p_idx]['u1'])
val1 = np_x_u_solve_y(xy_cor[n_idx]['val'], xy_cor[n_idx]['u0'])
new_add = [
{'type': tp0, 'val': val0, 'score': 0, 'action': 'forced infer', 'gpid': -1, 'u0': -1, 'u1': -1, 'tbd': False},
{'type': tp1, 'val': val1, 'score': 0, 'action': 'forced infer', 'gpid': -1, 'u0': -1, 'u1': -1, 'tbd': False},
]
xy_cor = xy_cor[:tbd] + new_add + xy_cor[tbd+1:]
return xy_cor
def gen_ww(init_coorx, coory, z=50, coorW=1024, coorH=512, floorW=1024, floorH=512, tol=3, force_cuboid=True):
gpid = get_gpid(init_coorx, coorW)
coor = np.hstack([np.arange(coorW)[:, None], coory[:, None]])
xy = np_coor2xy(coor, z, coorW, coorH, floorW, floorH)
# Generate wall-wall
if force_cuboid:
xy_cor = gen_ww_cuboid(xy, gpid, tol)
else:
xy_cor = gen_ww_general(init_coorx, xy, gpid, tol)
# Ceiling view to normal view
cor = []
for j in range(len(xy_cor)):
next_j = (j + 1) % len(xy_cor)
if xy_cor[j]['type'] == 1:
cor.append((xy_cor[next_j]['val'], xy_cor[j]['val']))
else:
cor.append((xy_cor[j]['val'], xy_cor[next_j]['val']))
cor = np_xy2coor(np.array(cor), z, coorW, coorH, floorW, floorH)
cor = np.roll(cor, -2 * cor[::2, 0].argmin(), axis=0)
return cor, xy_cor
| 12,474 | 33.652778 | 148 | py |
DMH-Net | DMH-Net-main/postprocess/LayoutNetv2.py | import numpy as np
import scipy.signal
import torch
from scipy.ndimage.filters import maximum_filter
from torch import optim
import postprocess.LayoutNet_post_proc2 as post_proc
from scipy.ndimage import convolve, map_coordinates
from shapely.geometry import Polygon
def LayoutNetv2PostProcessMain(cor_img: np.ndarray, edg_img: np.ndarray) -> np.ndarray:
"""
:param cor_img ndarray<512,1024>
:param edg_img ndarray<512,1024,3>
"""
# general layout, tp view
cor_ = cor_img.sum(0)
cor_ = (cor_ - np.amin(cor_)) / np.ptp(cor_)
min_v = 0.25 # 0.05
xs_ = find_N_peaks(cor_, r=26, min_v=min_v, N=None)[0]
# spetial case for too less corner
if xs_.shape[0] < 4:
xs_ = find_N_peaks(cor_, r=26, min_v=0.05, N=None)[0]
if xs_.shape[0] < 4:
xs_ = find_N_peaks(cor_, r=26, min_v=0, N=None)[0]
# get ceil and floor line
ceil_img = edg_img[:, :, 1]
floor_img = edg_img[:, :, 2]
ceil_idx = np.argmax(ceil_img, axis=0)
floor_idx = np.argmax(floor_img, axis=0)
# Init floor/ceil plane
z0 = 50
force_cuboid = False
_, z1 = post_proc.np_refine_by_fix_z(ceil_idx, floor_idx, z0)
# Generate general wall-wall
cor, xy_cor = post_proc.gen_ww(xs_, ceil_idx, z0, tol=abs(0.16 * z1 / 1.6), force_cuboid=force_cuboid)
if not force_cuboid:
# Check valid (for fear self-intersection)
xy2d = np.zeros((len(xy_cor), 2), np.float32)
for i in range(len(xy_cor)):
xy2d[i, xy_cor[i]['type']] = xy_cor[i]['val']
xy2d[i, xy_cor[i - 1]['type']] = xy_cor[i - 1]['val']
if not Polygon(xy2d).is_valid:
# actually it's not force cuboid, just assume all corners are visible, go back to original LayoutNet initialization
# print(
# 'Fail to generate valid general layout!! '
# 'Generate cuboid as fallback.',
# file=sys.stderr)
cor_id = get_ini_cor(cor_img, 21, 3)
force_cuboid = True
if not force_cuboid:
# Expand with btn coory
cor = np.hstack([cor, post_proc.infer_coory(cor[:, 1], z1 - z0, z0)[:, None]])
# Collect corner position in equirectangular
cor_id = np.zeros((len(cor) * 2, 2), np.float32)
for j in range(len(cor)):
cor_id[j * 2] = cor[j, 0], cor[j, 1]
cor_id[j * 2 + 1] = cor[j, 0], cor[j, 2]
# refinement
cor_id = optimize_cor_id(cor_id, edg_img, cor_img, num_iters=100, verbose=False)
return cor_id
def find_N_peaks(signal, r=29, min_v=0.05, N=None):
max_v = maximum_filter(signal, size=r, mode='wrap')
pk_loc = np.where(max_v == signal)[0]
pk_loc = pk_loc[signal[pk_loc] > min_v]
# check for odd case, remove one
if (pk_loc.shape[0]%2)!=0:
pk_id = np.argsort(-signal[pk_loc])
pk_loc = pk_loc[pk_id[:-1]]
pk_loc = np.sort(pk_loc)
if N is not None:
order = np.argsort(-signal[pk_loc])
pk_loc = pk_loc[order[:N]]
pk_loc = pk_loc[np.argsort(pk_loc)]
return pk_loc, signal[pk_loc]
def get_ini_cor(cor_img, d1=21, d2=3):
cor = convolve(cor_img, np.ones((d1, d1)), mode='constant', cval=0.0)
cor_id = []
cor_ = cor_img.sum(0)
cor_ = (cor_ - np.amin(cor_)) / np.ptp(cor_)
min_v = 0.25 # 0.05
xs_ = find_N_peaks(cor_, r=26, min_v=min_v, N=None)[0]
# spetial case for too less corner
if xs_.shape[0] < 4:
xs_ = find_N_peaks(cor_, r=26, min_v=0.05, N=None)[0]
if xs_.shape[0] < 4:
xs_ = find_N_peaks(cor_, r=26, min_v=0, N=None)[0]
X_loc = xs_
for x in X_loc:
x_ = int(np.round(x))
V_signal = cor[:, max(0, x_ - d2):x_ + d2 + 1].sum(1)
y1, y2 = find_N_peaks_conv(V_signal, prominence=None,
distance=20, N=2)[0]
cor_id.append((x, y1))
cor_id.append((x, y2))
cor_id = np.array(cor_id, np.float64)
return cor_id
def find_N_peaks_conv(signal, prominence, distance, N=4):
locs, _ = scipy.signal.find_peaks(signal,
prominence=prominence,
distance=distance)
pks = signal[locs]
pk_id = np.argsort(-pks)
pk_loc = locs[pk_id[:min(N, len(pks))]]
pk_loc = np.sort(pk_loc)
return pk_loc, signal[pk_loc]
def optimize_cor_id(cor_id, scoreedg, scorecor, num_iters=100, verbose=False):
assert scoreedg.shape == (512, 1024, 3)
assert scorecor.shape == (512, 1024)
Z = -1
ceil_cor_id = cor_id[0::2]
floor_cor_id = cor_id[1::2]
ceil_cor_id, ceil_cor_id_xy = constraint_cor_id_same_z(ceil_cor_id, scorecor, Z)
# ceil_cor_id_xyz = np.hstack([ceil_cor_id_xy, np.zeros(4).reshape(-1, 1) + Z])
ceil_cor_id_xyz = np.hstack([ceil_cor_id_xy, np.zeros(ceil_cor_id.shape[0]).reshape(-1, 1) + Z])
# TODO: revise here to general layout
# pc = (ceil_cor_id_xy[0] + ceil_cor_id_xy[2]) / 2
# print(ceil_cor_id_xy)
if abs(ceil_cor_id_xy[0, 0] - ceil_cor_id_xy[1, 0]) > abs(ceil_cor_id_xy[0, 1] - ceil_cor_id_xy[1, 1]):
ceil_cor_id_xy = np.concatenate((ceil_cor_id_xy[1:, :], ceil_cor_id_xy[:1, :]), axis=0)
# print(cor_id)
# print(ceil_cor_id_xy)
pc = np.mean(ceil_cor_id_xy, axis=0)
pc_vec = ceil_cor_id_xy[0] - pc
pc_theta = vecang(pc_vec, ceil_cor_id_xy[1] - pc)
pc_height = fit_avg_z(floor_cor_id, ceil_cor_id_xy, scorecor)
if ceil_cor_id_xy.shape[0] > 4:
pc_theta = np.array([ceil_cor_id_xy[1, 1]])
for c_num in range(2, ceil_cor_id_xy.shape[0] - 1):
if (c_num % 2) == 0:
pc_theta = np.append(pc_theta, ceil_cor_id_xy[c_num, 0])
else:
pc_theta = np.append(pc_theta, ceil_cor_id_xy[c_num, 1])
with torch.enable_grad():
scoreedg = torch.FloatTensor(scoreedg)
scorecor = torch.FloatTensor(scorecor)
pc = torch.FloatTensor(pc)
pc_vec = torch.FloatTensor(pc_vec)
pc_theta = torch.FloatTensor([pc_theta])
pc_height = torch.FloatTensor([pc_height])
pc.requires_grad = True
pc_vec.requires_grad = True
pc_theta.requires_grad = True
pc_height.requires_grad = True
# print(pc_theta)
# time.sleep(2)
# return cor_id
optimizer = optim.SGD([
pc, pc_vec, pc_theta, pc_height
], lr=1e-3, momentum=0.9)
best = {'score': 1e9}
for i_step in range(num_iters):
i = i_step if verbose else None
optimizer.zero_grad()
score = project2sphere_score(pc, pc_vec, pc_theta, pc_height, scoreedg, scorecor, i)
if score.item() < best['score']:
best['score'] = score.item()
best['pc'] = pc.clone()
best['pc_vec'] = pc_vec.clone()
best['pc_theta'] = pc_theta.clone()
best['pc_height'] = pc_height.clone()
score.backward()
optimizer.step()
pc = best['pc']
pc_vec = best['pc_vec']
pc_theta = best['pc_theta']
pc_height = best['pc_height']
opt_cor_id = pc2cor_id(pc, pc_vec, pc_theta, pc_height).detach().numpy()
split_num = int(opt_cor_id.shape[0] // 2)
opt_cor_id = np.stack([opt_cor_id[:split_num], opt_cor_id[split_num:]], axis=1).reshape(split_num * 2, 2)
# print(opt_cor_id)
# print(cor_id)
# time.sleep(500)
return opt_cor_id
def constraint_cor_id_same_z(cor_id, cor_img, z=-1):
# Convert to uv space
cor_id_u = ((cor_id[:, 0] + 0.5) / cor_img.shape[1] - 0.5) * 2 * np.pi
cor_id_v = ((cor_id[:, 1] + 0.5) / cor_img.shape[0] - 0.5) * np.pi
# Convert to xyz space (z=-1)
cor_id_c = z / np.tan(cor_id_v)
cor_id_xy = np.stack([
cor_id_c * np.cos(cor_id_u),
cor_id_c * np.sin(cor_id_u),
], axis=0).T
# # Fix 2 diagonal corner, move the others
# cor_id_score = map_coordinates(cor_img, [cor_id[:, 1], cor_id[:, 0]])
# if cor_id_score[0::2].sum() > cor_id_score[1::2].sum():
# idx0, idx1 = 0, 1
# else:
# idx0, idx1 = 1, 0
# pc = cor_id_xy[idx0::2].mean(0, keepdims=True)
# radius2 = np.sqrt(((cor_id_xy[idx0::2] - pc) ** 2).sum(1)).mean()
# d = cor_id_xy[idx1::2] - pc
# d1 = d[0]
# d2 = d[1]
# theta1 = (np.arctan2(d1[1], d1[0]) + 2 * np.pi) % (2 * np.pi)
# theta2 = (np.arctan2(d2[1], d2[0]) + 2 * np.pi) % (2 * np.pi)
# theta2 = theta2 - np.pi
# theta2 = (theta2 + 2 * np.pi) % (2 * np.pi)
# theta = (theta1 + theta2) / 2
# d[0] = (radius2 * np.cos(theta), radius2 * np.sin(theta))
# theta = theta - np.pi
# d[1] = (radius2 * np.cos(theta), radius2 * np.sin(theta))
# cor_id_xy[idx1::2] = pc + d
# Convert refined xyz back to uv space
cor_id_uv = np.stack([
np.arctan2(cor_id_xy[:, 1], cor_id_xy[:, 0]),
np.arctan2(z, np.sqrt((cor_id_xy ** 2).sum(1))),
], axis=0).T
# Convert to image index
col = (cor_id_uv[:, 0] / (2 * np.pi) + 0.5) * cor_img.shape[1] - 0.5
row = (cor_id_uv[:, 1] / np.pi + 0.5) * cor_img.shape[0] - 0.5
return np.stack([col, row], axis=0).T, cor_id_xy
def fit_avg_z(cor_id, cor_id_xy, cor_img):
score = map_coordinates(cor_img, [cor_id[:, 1], cor_id[:, 0]])
c = np.sqrt((cor_id_xy ** 2).sum(1))
cor_id_v = ((cor_id[:, 1] + 0.5) / cor_img.shape[0] - 0.5) * np.pi
z = c * np.tan(cor_id_v)
fit_z = (z * score).sum() / score.sum()
return fit_z
def map_coordinates_Pytorch(input, coordinates):
''' PyTorch version of scipy.ndimage.interpolation.map_coordinates
input: (H, W)
coordinates: (2, ...)
'''
h = input.shape[0]
w = input.shape[1]
def _coordinates_pad_wrap(h, w, coordinates):
coordinates[0] = coordinates[0] % h
coordinates[1] = coordinates[1] % w
return coordinates
co_floor = torch.floor(coordinates).long()
co_ceil = torch.ceil(coordinates).long()
d1 = (coordinates[1] - co_floor[1].float())
d2 = (coordinates[0] - co_floor[0].float())
co_floor = _coordinates_pad_wrap(h, w, co_floor)
co_ceil = _coordinates_pad_wrap(h, w, co_ceil)
f00 = input[co_floor[0], co_floor[1]]
f10 = input[co_floor[0], co_ceil[1]]
f01 = input[co_ceil[0], co_floor[1]]
f11 = input[co_ceil[0], co_ceil[1]]
fx1 = f00 + d1 * (f10 - f00)
fx2 = f01 + d1 * (f11 - f01)
return fx1 + d2 * (fx2 - fx1)
def project2sphere_score(pc, pc_vec, pc_theta, pc_height, scoreedg, scorecor, i_step=None):
# Sample corner loss
corid = pc2cor_id(pc, pc_vec, pc_theta, pc_height)
corid_coordinates = torch.stack([corid[:, 1], corid[:, 0]])
loss_cor = -map_coordinates_Pytorch(scorecor, corid_coordinates).mean()
# Sample boundary loss
if pc_theta.numel()==1:
p1 = pc + pc_vec
p2 = pc + rotatevec(pc_vec, pc_theta)
p3 = pc - pc_vec
p4 = pc + rotatevec(pc_vec, pc_theta - np.pi)
segs = [
pts_linspace(p1, p2),
pts_linspace(p2, p3),
pts_linspace(p3, p4),
pts_linspace(p4, p1),
]
else:
ps = pc + pc_vec
ps = ps.view(-1,2)
for c_num in range(pc_theta.shape[1]):
ps = torch.cat((ps, ps[c_num:,:]),0)
if (c_num % 2) == 0:
ps[-1,1] = pc_theta[0,c_num]
else:
ps[-1,0] = pc_theta[0,c_num]
ps = torch.cat((ps, ps[-1:,:]),0)
ps[-1,1] = ps[0,1]
segs = []
for c_num in range(ps.shape[0]-1):
segs.append(pts_linspace(ps[c_num,:], ps[c_num+1,:]))
segs.append(pts_linspace(ps[-1,:], ps[0,:]))
# ceil-wall
loss_ceilwall = 0
for seg in segs:
ceil_uv = xyz2uv(seg, z=-1)
ceil_idx = uv2idx(ceil_uv, 1024, 512)
ceil_coordinates = torch.stack([ceil_idx[:, 1], ceil_idx[:, 0]])
loss_ceilwall -= map_coordinates_Pytorch(scoreedg[..., 1], ceil_coordinates).mean() / len(segs)
# floor-wall
loss_floorwall = 0
for seg in segs:
floor_uv = xyz2uv(seg, z=pc_height)
floor_idx = uv2idx(floor_uv, 1024, 512)
floor_coordinates = torch.stack([floor_idx[:, 1], floor_idx[:, 0]])
loss_floorwall -= map_coordinates_Pytorch(scoreedg[..., 2], floor_coordinates).mean() / len(segs)
#losses = 1.0 * loss_cor + 0.1 * loss_wallwall + 0.5 * loss_ceilwall + 1.0 * loss_floorwall
losses = 1.0 * loss_cor + 1.0 * loss_ceilwall + 1.0 * loss_floorwall
# if i_step is not None:
# with torch.no_grad():
# print('step %d: %.3f (cor %.3f, wall %.3f, ceil %.3f, floor %.3f)' % (
# i_step, losses,
# loss_cor, loss_wallwall,
# loss_ceilwall, loss_floorwall))
return losses
def vecang(vec1, vec2):
vec1 = vec1 / np.sqrt((vec1 ** 2).sum())
vec2 = vec2 / np.sqrt((vec2 ** 2).sum())
return np.arccos(np.dot(vec1, vec2))
def rotatevec(vec, theta):
x = vec[0] * torch.cos(theta) - vec[1] * torch.sin(theta)
y = vec[0] * torch.sin(theta) + vec[1] * torch.cos(theta)
return torch.cat([x, y])
def pts_linspace(pa, pb, pts=300):
pa = pa.view(1, 2)
pb = pb.view(1, 2)
w = torch.arange(0, pts + 1, dtype=pa.dtype).view(-1, 1)
return (pa * (pts - w) + pb * w) / pts
def xyz2uv(xy, z=-1):
c = torch.sqrt((xy ** 2).sum(1))
u = torch.atan2(xy[:, 1], xy[:, 0]).view(-1, 1)
v = torch.atan2(torch.zeros_like(c) + z, c).view(-1, 1)
return torch.cat([u, v], dim=1)
def uv2idx(uv, w, h):
col = (uv[:, 0] / (2 * np.pi) + 0.5) * w - 0.5
row = (uv[:, 1] / np.pi + 0.5) * h - 0.5
return torch.cat([col.view(-1, 1), row.view(-1, 1)], dim=1)
def pc2cor_id(pc, pc_vec, pc_theta, pc_height):
if pc_theta.numel() == 1:
ps = torch.stack([
(pc + pc_vec),
(pc + rotatevec(pc_vec, pc_theta)),
(pc - pc_vec),
(pc + rotatevec(pc_vec, pc_theta - np.pi))
])
else:
ps = pc + pc_vec
ps = ps.view(-1, 2)
for c_num in range(pc_theta.shape[1]):
ps = torch.cat((ps, ps[c_num:, :]), 0)
if (c_num % 2) == 0:
ps[-1, 1] = pc_theta[0, c_num]
else:
ps[-1, 0] = pc_theta[0, c_num]
ps = torch.cat((ps, ps[-1:, :]), 0)
ps[-1, 1] = ps[0, 1]
return torch.cat([
uv2idx(xyz2uv(ps, z=-1), 1024, 512),
uv2idx(xyz2uv(ps, z=pc_height), 1024, 512),
], dim=0)
| 14,463 | 34.364303 | 127 | py |
DMH-Net | DMH-Net-main/postprocess/postprocess2.py | import argparse
import math
import warnings
from typing import List, Optional, Tuple, Dict
import numpy as np
import py360convert
import scipy.signal
import torch
from matplotlib import pyplot as plt
from torch import nn
from torch.utils.data import DataLoader
from tqdm import trange
from config import cfg_from_yaml_file, cfg, cfg_from_list
from e2plabel.e2plabelconvert import VIEW_NAME, VIEW_ARGS
from eval_cuboid import test
from eval_general import test_general
from layers import PerspectiveE2PP2E
from misc.utils import pipeload
from model import DMHNet, ENCODER_RESNET, ENCODER_DENSENET, ENCODER_HOUGH
from perspective_dataset import PerspectiveDataset
from postprocess.GDSolver import solve
from postprocess.LayoutNetv2 import LayoutNetv2PostProcessMain
def line3DConvertCore(cfg, line: List[torch.Tensor], fov, img_hw, view_idx, dis_d=None, dis_u=None, dis_f=None,
yline_mode="ud") -> torch.Tensor:
"""
线表达的格式:(n,8) n是线的总数(必定等于lines[view_idx]中的各tensor大小之和),8维依次表示有效位、类型、标志、x、y、z、所在面的原始类型、所在面的序号。
有效位为1时:
类型:0-x值改变的线,1-y值改变的线,2-z值改变的线。
标志:1表示作为pred结果的线,0表示一般的线 TODO
原始类型:在视图中的线类型.。0:xleft 1:xright 2:yup 3:ydown 4:cupleft 5:cupright 6:cdownleft 7:cdownright
坐标系规定:相机为原点,前向(F)为y轴正向,右向(R)为x轴正向,上向(U)为z轴正向。
"""
ratio = lineCoordToRatio(cfg, line, img_hw)
fov_hori, fov_vert = fov
# 以视图中的内容作为坐标系,获得所有线的表达
result = []
# x方向线的处理
for r, v in zip(ratio[0], line[0]):
isXright = v >= img_hw[1] / 2
try:
if isXright:
# xright
x = dis_f * calLineAngleTan(r, fov_hori)
result.append([1, 2, 0, x, dis_f, math.nan, 1, view_idx])
else:
# xleft
x = dis_f * calLineAngleTan(r, fov_hori)
result.append([1, 2, 0, -x, dis_f, math.nan, 0, view_idx])
except TypeError as e:
if len(e.args) > 0 and e.args[0].find("NoneType") != -1:
# 传入参数为None的情况,不处理即可
result.append([0, 2, 0, r, v, math.nan, 1 if isXright else 0, view_idx])
else:
raise e
# y方向线的处理
for r, v in zip(ratio[1], line[1]):
isYdown = v >= img_hw[0] / 2
try:
if isYdown:
if yline_mode[1] == "d":
y = dis_d / calLineAngleTan(r, fov_vert)
result.append([1, 0, 0, math.nan, y, -dis_d, 3, view_idx])
elif yline_mode[1] == "f":
z = dis_f * calLineAngleTan(r, fov_vert)
result.append([1, 0, 0, math.nan, dis_f, -z, 3, view_idx])
else:
assert False
else:
# yup
if yline_mode[0] == "u":
y = dis_u / calLineAngleTan(r, fov_vert)
result.append([1, 0, 0, math.nan, y, dis_u, 2, view_idx])
elif yline_mode[0] == "f":
z = dis_f * calLineAngleTan(r, fov_vert)
result.append([1, 0, 0, math.nan, dis_f, z, 2, view_idx])
else:
assert False
except TypeError as e:
if len(e.args) > 0 and e.args[0].find("NoneType") != -1:
# 传入参数为None的情况,不处理即可
result.append([0, 0, 0, r, v, math.nan, 3 if isYdown else 2, view_idx])
else:
raise e
# cup线的处理
for r, v in zip(ratio[2], line[2]):
isRight = r >= 0
try:
x = dis_u * r
result.append([1, 1, 0, x, math.nan, dis_u, 5 if isRight else 4, view_idx])
except TypeError as e:
if len(e.args) > 0 and e.args[0].find("NoneType") != -1:
# 传入参数为None的情况,不处理即可
result.append([0, 1, 0, r, v, math.nan, 5 if isRight else 4, view_idx])
else:
raise e
# cdown线的处理
for r, v in zip(ratio[3], line[3]):
isRight = r >= 0
try:
x = dis_d * r
result.append([1, 1, 0, x, math.nan, -dis_d, 5 if isRight else 4, view_idx])
except TypeError as e:
if len(e.args) > 0 and e.args[0].find("NoneType") != -1:
# 传入参数为None的情况,不处理即可
result.append([0, 1, 0, r, v, math.nan, 5 if isRight else 4, view_idx])
else:
raise e
result = torch.tensor(result, dtype=torch.float32, device=line[0].device)
if len(result) == 0:
result = result.new_zeros((0, 8))
return result
def line3DConvert(cfg, line: List[torch.Tensor], fov, img_hw, view_idx, dis_d=None, dis_u=None, dis_f=None,
yline_mode="ud") -> torch.Tensor:
r = line3DConvertCore(cfg, line, fov, img_hw, view_idx, dis_d=dis_d, dis_u=dis_u, dis_f=dis_f,
yline_mode=yline_mode)
if VIEW_NAME[view_idx] == "F":
pass
elif VIEW_NAME[view_idx] == "B":
r[:, 3:5] *= -1
elif VIEW_NAME[view_idx] == "L":
oriy = r[:, 4].clone()
r[:, 4] = r[:, 3] # y <- x
r[:, 3] = -oriy # x <- -y
yLineMask = r[:, 1] == 1 # 交换线类别中的x线与y线
r[r[:, 1] == 0, 1] = 1
r[yLineMask, 1] = 0
elif VIEW_NAME[view_idx] == "R":
oriy = r[:, 4].clone()
r[:, 4] = -r[:, 3] # y <- -x
r[:, 3] = oriy # x <- y
yLineMask = r[:, 1] == 1 # 交换线类别中的x线与y线
r[r[:, 1] == 0, 1] = 1
r[yLineMask, 1] = 0
elif VIEW_NAME[view_idx] == "D":
oriy = r[:, 4].clone()
r[:, 4] = r[:, 5] # y <- z
r[:, 5] = -oriy # z <- -y
yLineMask = r[:, 1] == 1 # 交换线类别中的z线与y线
r[r[:, 1] == 2, 1] = 1
r[yLineMask, 1] = 2
elif VIEW_NAME[view_idx] == "U":
oriy = r[:, 4].clone()
r[:, 4] = -r[:, 5] # y <- -z
r[:, 5] = oriy # z <- y
yLineMask = r[:, 1] == 1 # 交换线类别中的z线与y线
r[r[:, 1] == 2, 1] = 1
r[yLineMask, 1] = 2
else:
assert False
return r
def allLinesConvert(cfg, lines: List[List[torch.Tensor]], img_hw, camera_height, dis_u, dis_box, view_args,
extra: Optional[List[List[torch.Tensor]]] = None):
line_result = []
# 四个中间面
for view_idx in range(4):
r = line3DConvert(cfg, lines[view_idx], view_args[view_idx][0], img_hw, view_idx, dis_d=camera_height,
dis_u=dis_u, dis_f=dis_box[view_idx] if dis_box is not None else None, yline_mode="ud")
line_result.append(r)
# 上面
view_idx = 4
r = line3DConvert(cfg, lines[view_idx], view_args[view_idx][0], img_hw, view_idx, dis_f=dis_u,
dis_u=dis_box[2] if dis_box is not None else None,
dis_d=dis_box[0] if dis_box is not None else None, yline_mode="ff")
line_result.append(r)
# 下面
view_idx = 5
r = line3DConvert(cfg, lines[view_idx], view_args[view_idx][0], img_hw, view_idx, dis_f=camera_height,
dis_u=dis_box[0] if dis_box is not None else None,
dis_d=dis_box[2] if dis_box is not None else None, yline_mode="ff")
line_result.append(r)
line_result = torch.cat(line_result, 0)
if extra is not None: # 附加extra信息
line_result = torch.cat([line_result, torch.cat([torch.cat(a) for a in extra]).unsqueeze(1)], 1)
return line_result
def classifyLine(line) -> Tuple[torch.Tensor, List[torch.Tensor]]:
"""
把线条分成12类:三个值分别代表xyz与0的关系,-表示线沿该方向。
0->> 1->< 2-<> 3-<< 4>-> 5>-< 6<-> 7<-< 8>>- 9><- 10<>- 11<<-
类别信息附加在线的索引最后一维处
:param line (n, k)
:return Tuple[(n, k+1)-处理后的附加了类别信息的线结构,前k维是输入的原始线表示,最后一维是类别;
按照12类别分类后的线的数组]
"""
classify_arr = []
for l in line:
if l[1] == 0:
c = (l[4] < 0) * 2 + (l[5] < 0) + 0
elif l[1] == 1:
c = (l[3] < 0) * 2 + (l[5] < 0) + 4
elif l[1] == 2:
c = (l[3] < 0) * 2 + (l[4] < 0) + 8
classify_arr.append(c)
line = torch.cat([line, line.new_tensor(classify_arr).unsqueeze(1)], 1)
classified_result = []
for i in range(12):
classified_result.append(line[line[:, -1] == i])
return line, classified_result
def lwhToPeaks(cfg, lwh: torch.Tensor, img_hw) -> List[List[Tuple[torch.Tensor, torch.Tensor]]]:
"""
:return 6个面、4种线、两个数第一个代表霍夫域值、第二个代表线的可见长度百分比
"""
lwh = torch.abs(lwh)
result = [[[] for _ in range(4)] for _ in range(6)]
TABLE = [
[3, [0, 1, 5, 4]],
[1, [3, 2, 5, 4]],
[2, [1, 0, 5, 4]],
[0, [2, 3, 5, 4]],
[5, [0, 1, 2, 3]],
[4, [0, 1, 3, 2]]
]
for view_idx in range(6):
ratio = 1 - (lwh[TABLE[view_idx][1]] / lwh[TABLE[view_idx][0]])
with torch.set_grad_enabled(True):
length = ratio.new_ones(4)
pointPlace = [(img_hw[1] - 1) / 2 * ratio[0], (img_hw[1] - 1) / 2 * (2 - ratio[1]),
(img_hw[0] - 1) / 2 * ratio[2], (img_hw[0] - 1) / 2 * (2 - ratio[3])]
# 找交点、refine 水平竖直线长度、计算中心线
for i in range(2):
for j in range(2, 4):
if not (ratio[i] > 0 and ratio[j] > 0):
continue
with torch.set_grad_enabled(True):
length[i] = length[i] - (ratio[j] / 2)
length[j] = length[j] - (ratio[i] / 2)
lenCLine = min(ratio[i], ratio[j]).clone()
houghParamCLine, isCDown = PerspectiveDataset.coord2AngleValue(pointPlace[i], pointPlace[j], img_hw)
result[view_idx][3 if isCDown == 1 else 2].append((houghParamCLine, lenCLine))
# 添加水平和竖直的peak
for i in range(4):
if ratio[i] > 0:
result[view_idx][0 if i < 2 else 1].append((pointPlace[i], length[i]))
for t in range(4):
l = result[view_idx][t]
l2 = (torch.stack([tup[0] for tup in l]) if len(l) > 0 else lwh.new_zeros(0),
torch.stack([tup[1] for tup in l]) if len(l) > 0 else lwh.new_zeros(0))
result[view_idx][t] = l2
return result
def solveActualHeightByIOU(d: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
"""
:param d shape(n, 4),四维分别代表(y+, x+, y-, x-)。u格式相同。
:return shape(n) 表示相机到上方实际高度,与当前假设高度的比值
"""
changeToNegative = d.new_tensor([-1, 1, -1, 1])
d, u = (d[[3, 1, 2, 0]] * changeToNegative).unsqueeze(0), (u[[3, 1, 2, 0]] * changeToNegative).unsqueeze(0)
class UpperIOUModule(nn.Module):
def __init__(self):
super().__init__()
self.w = nn.Parameter(torch.ones(d.shape[0], requires_grad=True))
def forward(self):
return (1 - calculateIOU(d, u * self.w).diagonal()).mean()
module = UpperIOUModule().to(d.device)
module = solve(module, max_iter=100, lr=1e-2, stop_tol=1e-3, stop_range=5)
return module.w.data
def interpolate(vector: torch.Tensor, position: torch.Tensor):
floored = torch.floor(position).to(torch.int64)
flooredAdd1 = floored + 1
floored = torch.clamp(floored, 0, len(vector) - 1)
flooredAdd1 = torch.clamp(flooredAdd1, 0, len(vector) - 1)
remain = position - floored
return remain * vector[flooredAdd1] + (1 - remain) * vector[floored]
def lossFunction(output, img_idx, peakss):
preds_xy = torch.sigmoid(output["p_preds_xy"][img_idx]).to(peakss[0][0][0].device)
preds_cud = torch.sigmoid(output["p_preds_cud"][img_idx]).to(peakss[0][0][0].device)
views_result = []
for view_idx, ((xPeaks, xLength), (yPeaks, yLength), (cupPeaks, cupLength),
(cDownPeaks, cDownLength)) in enumerate(peakss):
fourPreds = [preds_xy[view_idx, :, 0], preds_xy[view_idx, :, 1],
preds_cud[view_idx, :, 0], preds_cud[view_idx, :, 1]]
fourPeaks = [xPeaks, yPeaks, cupPeaks, cDownPeaks]
fourLength = [xLength, yLength, cupLength, cDownLength]
view_result = []
for preds, peaks, length in zip(fourPreds, fourPeaks, fourLength):
scores = interpolate(preds, peaks)
scores = scores * length
view_result.append(scores)
view_result = torch.cat(view_result)
views_result.append(view_result)
views_result = torch.cat(views_result)
final_result = views_result.sum()
final_result.requires_grad_(True)
return 20 - final_result
def solveLwh(cfg, output, img_idx, start_lwh, img_hw):
class LWHSolveModule(nn.Module):
def __init__(self):
super().__init__()
startParam = start_lwh[[0, 1, 2, 3, 5]]
self.param = nn.Parameter(startParam.clone().detach().requires_grad_(True), requires_grad=True)
def lwh(self):
return torch.cat([self.param[0:4], self.param.new_tensor([-1.6]), self.param[4:5]])
def forward(self):
lwh = self.lwh()
peaks = lwhToPeaks(cfg, lwh, img_hw)
return lossFunction(output, img_idx, peaks)
module = LWHSolveModule().to("cpu")
module = solve(module, max_iter=100, lr=1e-2, stop_tol=1e-3, stop_range=5)
return module.lwh().data, torch.abs(module.forward())
def linesGTProcess_PretendDisUThenOptimIOU(cfg, lines: List[List[torch.Tensor]], img_hw, camera_height=1.6,
view_args=VIEW_ARGS,
preset_dis_u=1.6):
"""
假设一个相机到天花板的高度,完整的估计出上方的方框的尺寸,再通过优化IOU问题求高度
"""
with torch.no_grad():
line_result = allLinesConvert(cfg, lines, img_hw, camera_height, preset_dis_u, None, view_args)
line_result, classified = classifyLine(line_result)
# 注意,到此时,z线(竖直线)的坐标还没有算出
# 断言:对于GT,每个类别中的、在原始perspect中是xyline的线应当有且仅有一条
# 除了没有算出的z线外,余下的八条线就构成了下框和上框
# 优化两者的IOU最大问题,求出了height。利用height
dis_u = preset_dis_u
downbox, upbox, _ = calculateUDBox(cfg, classified, useCLine=False, useZLine=False,
require_onlyone=cfg.DATA.TYPE == "cuboid")
w = solveActualHeightByIOU(downbox, upbox).item()
dis_u = dis_u * w
dis_box = (downbox + upbox * w) / 2
# 单独重算一次line_result,以让z线能产生结果
line_result = allLinesConvert(cfg, lines, img_hw, camera_height, dis_u, dis_box, view_args)
return line_result, dis_box.new_tensor(
[-dis_box[3], dis_box[1], -dis_box[2], dis_box[0], -camera_height, dis_u])
def updateProb(line: torch.Tensor, amount: float, mask: Optional[torch.BoolTensor] = None) -> torch.Tensor:
toAdd = torch.zeros_like(line)
if mask is not None:
toAdd[mask, 8] = amount
else:
toAdd[:, 8] = amount
return line + toAdd
def calculateUDBox(cfg, classified: List[torch.Tensor], useCLine=True, useZLine=True, c_line_prob_punish=None,
z_line_prob_punish=None, require_onlyone=False) -> Tuple[
torch.Tensor, torch.Tensor, Optional[dict]]:
"""
根据已经算好的、分类过的三维线结果,重算上下框。
重算上下框的策略:每个框对应三组线:两组竖直线、一组本身对应的天花板线或地板线。
给竖直线的概率均减去一个常数,再把三组线merge到一块、取概率最大者。
:return Tuple[downbox, upbox, extra] box的格式为(y+, x+, y-, x-),extra是dict或None
"""
c_line_prob_punish = c_line_prob_punish if c_line_prob_punish is not None else cfg.POST_PROCESS.CPP
z_line_prob_punish = z_line_prob_punish if z_line_prob_punish is not None else cfg.POST_PROCESS.ZPP
z_class_seq = [[8, 10], [8, 9], [9, 11], [10, 11]] if useZLine else [[]] * 4
result = []
extra = None
for mainClass, zClasses, name in zip([1, 5, 3, 7, 0, 4, 2, 6], z_class_seq * 2,
["_↑", "_→", "_↓", "_←", "¯↑", "¯→", "¯↓", "¯←"]):
if not useZLine:
t = classified[mainClass]
notCLine_mask = torch.logical_and(0 <= t[:, 6], t[:, 6] <= 3)
valid_mask = t[:, 0] == 1 if useCLine else torch.logical_and(t[:, 0] == 1, notCLine_mask)
t = t[valid_mask]
if useCLine and c_line_prob_punish is not None: # cline惩罚
t = updateProb(t, -c_line_prob_punish, ~notCLine_mask) # 减去被惩罚的概率值
r = t
else:
r = []
t = classified[mainClass]
notCLine_mask = torch.logical_and(0 <= t[:, 6], t[:, 6] <= 3)
valid_mask = t[:, 0] == 1 if useCLine else torch.logical_and(t[:, 0] == 1, notCLine_mask)
t = t[valid_mask]
if useCLine and c_line_prob_punish is not None: # cline惩罚
t = updateProb(t, -c_line_prob_punish, ~notCLine_mask) # 减去被惩罚的概率值
r.append(t)
for zClass in zClasses:
t = classified[zClass]
notCLine_mask = torch.logical_and(0 <= t[:, 6], t[:, 6] <= 3)
valid_mask = t[:, 0] == 1 if useCLine else torch.logical_and(t[:, 0] == 1, notCLine_mask)
t = t[valid_mask]
if useCLine and c_line_prob_punish is not None: # cline惩罚
t = updateProb(t, -c_line_prob_punish, ~notCLine_mask) # 减去被惩罚的概率值
if z_line_prob_punish is not None: # zline惩罚(对zCLass的所有类适用)
t = updateProb(t, -z_line_prob_punish) # 减去被惩罚的概率值
r.append(t)
r = torch.cat(r, 0)
pickWhichAxis = (1 if mainClass <= 3 else 0) + 3
if require_onlyone:
if len(r) != 1:
warnings.warn(
"calculateUDBox assertion require_onlyone fail! for mainClass {:d}, has {:} lines".format(mainClass,
len(r)))
else:
r[:, 2] = 1
result.append(r[:, pickWhichAxis].mean().abs())
else:
if len(r) > 0:
idx = r[:, 8].argmax()
r[idx, 2] = 1
result.append(r[idx, pickWhichAxis].abs())
else:
warnings.warn("calculateUDBox: no line for mainClass {:s}({:d})!".format(name, mainClass))
result.append(cfg.POST_PROCESS.DEFAULT_DISTANCE)
if extra is None: extra = {}
if "noline" not in extra: extra["noline"] = []
extra["noline"].append(name)
if extra is not None and "noline" in extra: extra["noline"] = " ".join(extra["noline"])
return classified[0].new_tensor(result[0:4]), classified[0].new_tensor(result[4:8]), extra
def linesPredProcess_PretendDisUThenOptimIOU(cfg, lines: List[List[torch.Tensor]], probs: List[List[torch.Tensor]],
img_hw, camera_height=1.6,
view_args=VIEW_ARGS,
preset_dis_u=1.6):
"""
假设一个相机到天花板的高度,完整的估计出上方的方框的尺寸,再通过优化IOU问题求高度
"""
with torch.no_grad():
line_result = allLinesConvert(cfg, lines, img_hw, camera_height, preset_dis_u, None, view_args, extra=probs)
line_result, classified = classifyLine(line_result)
# 注意,到此时,z线(竖直线)的坐标还没有算出
# 对于Pred,除了没有算出的z线外,余下的8类中每类取概率最大的线,这八条线就构成了下框和上框
# 优化两者的IOU最大问题,求出了height,同时也就求出了长宽高
dis_u = preset_dis_u
downbox, upbox, extra = calculateUDBox(cfg, classified, useZLine=False)
w = solveActualHeightByIOU(downbox, upbox).item()
dis_u = dis_u * w
dis_box = (downbox + upbox * w) / 2
extra_nz = extra["noline"] if extra is not None and "noline" in extra else None
for i in range(cfg.POST_PROCESS.ITER if extra_nz is None else cfg.POST_PROCESS.ITER_NZ):
# 输入:到房间顶的距离、下框、上框
# 过程:1.根据下框和上框求解距离因子,更新到房间顶的距离;
# 2.根据新的到房间顶的距离,和上框下框(上框要用刚算出的w缩放一下)的平均值,重求解空间中所有线;
# 3.根据线的完全结果(包含了竖直线和框线的结果)更新下框和上框
# 根据此规则迭代,道理上就可以完成对框线的优化
olddownbox, oldupbox = downbox, upbox
line_result = allLinesConvert(cfg, lines, img_hw, camera_height, dis_u, dis_box, view_args, extra=probs)
line_result, classified = classifyLine(line_result)
downbox, upbox, extra = calculateUDBox(cfg, classified)
w = solveActualHeightByIOU(downbox, upbox).item()
dis_u = dis_u * w
dis_box = (downbox + upbox * w) / 2
if torch.mean(torch.abs(downbox - olddownbox)) < 1e-3 and torch.mean(torch.abs(upbox - oldupbox)) < 1e-3:
break
line_result = allLinesConvert(cfg, lines, img_hw, camera_height, dis_u, dis_box, view_args, extra=probs)
if extra_nz is not None:
if extra is None: extra = {}
extra["nz"] = extra_nz
return line_result, dis_box.new_tensor(
[-dis_box[3], dis_box[1], -dis_box[2], dis_box[0], -camera_height, dis_u]), extra
def findPeaks(cfg, vector: torch.Tensor) -> torch.Tensor:
# TODO 调参等
# locs = scipy.signal.find_peaks_cwt(vector.cpu().numpy(), np.arange(10, 60))
# locs, _ = scipy.signal.find_peaks(vector.cpu().numpy(), height=0.5, distance=60, prominence=0.4)
locs, _ = scipy.signal.find_peaks(vector.cpu().numpy(), distance=cfg.POST_PROCESS.get("PEAK_DISTANCE", 60),
height=cfg.POST_PROCESS.PEAK_HEIGHT,
prominence=cfg.POST_PROCESS.PEAK_PROMINENCE)
return vector.new_tensor(locs)
def predProbMap_PretendDisUThenOptimIOU(cfg, output, img_idx, img_hw, camera_height=1.6, view_args=VIEW_ARGS,
preset_dis_u=1.6):
with torch.no_grad():
lines, probs = extractPredPeaks(cfg, output, img_idx)
return linesPredProcess_PretendDisUThenOptimIOU(cfg, lines, probs, img_hw, camera_height, view_args,
preset_dis_u)
def extractPredPeaks(cfg, output, img_idx):
lines, probs = [], []
for view_idx in range(6):
view_line, view_prob = [], []
for signal in [output["p_preds_xy"][img_idx, view_idx, :, 0],
output["p_preds_xy"][img_idx, view_idx, :, 1],
output["p_preds_cud"][img_idx, view_idx, :, 0],
output["p_preds_cud"][img_idx, view_idx, :, 1]]:
signal = torch.sigmoid(signal)
# 前半部分和后半部分分别寻找peak
mid = len(signal) // 2
peak = torch.cat([findPeaks(cfg, signal[0:mid]), mid + findPeaks(cfg, signal[mid:])])
prob = signal[peak.to(torch.int64)]
view_line.append(peak)
view_prob.append(prob)
lines.append(view_line)
probs.append(view_prob)
return lines, probs
def findPeaks2D8Points(cfg, matrix: torch.Tensor) -> torch.Tensor:
# 每一列,从上、下半部分各取出值最大的n个点, 取平均
h2f = matrix.shape[0] // 2
column_vec = torch.cat([matrix[0:h2f].topk(cfg.POST_PROCESS.EMASK.ROW_CHOOSE_N, dim=0)[0],
matrix[h2f:].topk(cfg.POST_PROCESS.EMASK.ROW_CHOOSE_N, dim=0)[0]], 0).mean(0)
result = []
columns = []
# 对这个东西进行逐段的峰值提取
for i in range(4):
begin, end = matrix.shape[1] * i // 4, matrix.shape[1] * (i + 1) // 4
seq = column_vec[begin:end]
peaks1D = findPeaks(cfg, seq).to(torch.int64)
if len(peaks1D) > 0:
best_peak = peaks1D[seq[peaks1D].argmax()].item() + begin
else:
warnings.warn("when find 2d peaks for emask, len(peaks1D) == 0 when calculating column!")
best_peak = (begin + end) // 2
columns.append(best_peak)
for column in columns:
for i in reversed(range(2)):
begin, end = matrix.shape[0] * i // 2, matrix.shape[0] * (i + 1) // 2
seq = matrix[begin:end, column]
peaks1D = findPeaks(cfg, seq).to(torch.int64)
if len(peaks1D) > 0:
best_peak = peaks1D[seq[peaks1D].argmax()].item() + begin
else:
warnings.warn("when find 2d peaks for emask, len(peaks1D) == 0 when calculating column!")
best_peak = (begin + end) // 2
result.extend([(column, best_peak)])
return matrix.new_tensor(result)
def calPredCorIdByEMask(cfg, emask_img, z0=50):
type_weight = emask_img.new_tensor(cfg.POST_PROCESS.EMASK.TYPE_WEIGHT)
type_weight /= type_weight.sum()
emask_score = (type_weight.unsqueeze(-1).unsqueeze(-1) * emask_img).sum(dim=0)
coords = findPeaks2D8Points(cfg, emask_score).cpu().numpy()
indices = np.repeat(np.argsort(coords[1::2, 0]) * 2, 2)
indices[0::2] += 1
coords = coords[indices]
xyz = py360convert.uv2unitxyz(py360convert.coor2uv(coords, *emask_img.shape[1:3]))
z1 = (xyz[1::2, 1] / xyz[0::2, 1]).mean() * z0
return coords, z0, z1.item()
def calMetrics_PretendDisUThenOptimIOU(cfg, input, output, img_idx, optimization, camera_height=1.6,
view_args=VIEW_ARGS, preset_dis_u=1.6) -> Tuple[
Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Dict[
str, torch.Tensor]]:
with torch.no_grad():
img_hw = input["p_imgs"].shape[-2:]
e_img_hw = input["e_img"].shape[-2:]
gt_lines, gt_lwh = linesGTProcess_PretendDisUThenOptimIOU(cfg, input["peaks"][img_idx], img_hw, camera_height,
view_args, preset_dis_u)
gt_cor_id = input["cor"][img_idx]
pred_lines, pred_lwh, pred_extra = predProbMap_PretendDisUThenOptimIOU(cfg, output, img_idx, img_hw,
camera_height,
view_args, preset_dis_u)
if optimization:
pred_lwh, _ = solveLwh(cfg, output, img_idx, pred_lwh, img_hw)
pred_peaks, pred_probs = extractPredPeaks(cfg, output, img_idx)
pred_lines = allLinesConvert(cfg, pred_peaks, img_hw, -pred_lwh[4], pred_lwh[5], pred_lwh[[3, 1, 2, 0]],
view_args, pred_probs)
corner_method = cfg.POST_PROCESS.get("CORNER_METHOD", "lwh")
if corner_method == "lwh":
pred_cor_id_np, z0, z1 = cvtPredLwhToEquirecCornerCoords(pred_lwh, e_img_hw)
elif corner_method == "emask":
pred_cor_id_np, z0, z1 = calPredCorIdByEMask(cfg, output["p_preds_emask"][img_idx])
pred_cor_id = pred_lwh.new_tensor(pred_cor_id_np)
# 统一到GT所在的device上进行计算
pred_lines, pred_lwh = pred_lines.to(gt_lines.device), pred_lwh.to(gt_lwh.device)
metrics = {}
# 调用HorizonNet的算指标代码进行计算
t = {}
if cfg.POST_PROCESS.get("TEST_WITH_BOTH", False):
test_general(pred_cor_id_np, gt_cor_id.cpu().numpy(), e_img_hw[1], e_img_hw[0], t)
t["3DIoU-general"] = t["3DIoU"]
test(pred_cor_id_np, z0, z1, gt_cor_id.cpu().numpy(), e_img_hw[1], e_img_hw[0], t)
elif cfg.DATA.TYPE == "cuboid":
test(pred_cor_id_np, z0, z1, gt_cor_id.cpu().numpy(), e_img_hw[1], e_img_hw[0], t)
elif cfg.DATA.TYPE == "general":
test_general(pred_cor_id_np, gt_cor_id.cpu().numpy(), e_img_hw[1], e_img_hw[0], t)
else:
assert False
for k in t:
metrics[k] = pred_lwh.new_tensor(t[k]) if not isinstance(t[k], str) else t[k]
if pred_extra is not None:
metrics.update(pred_extra)
return (gt_lines, gt_lwh, gt_cor_id), (pred_lines, pred_lwh, pred_cor_id), metrics
def calMetrics_PretendDisUThenOptimIOUForV2(cfg, input, output, img_idx, optimization, camera_height=1.6,
view_args=VIEW_ARGS, preset_dis_u=1.6) -> Tuple[
Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Dict[
str, torch.Tensor], torch.Tensor]:
with torch.no_grad():
img_hw = input["p_imgs"].shape[-2:]
e_img_hw = input["e_img"].shape[-2:]
gt_lines, gt_lwh = linesGTProcess_PretendDisUThenOptimIOU(cfg, input["peaks"][img_idx], img_hw, camera_height,
view_args, preset_dis_u)
gt_cor_id = input["cor"][img_idx]
pred_lines, pred_lwh, pred_extra = predProbMap_PretendDisUThenOptimIOU(cfg, output, img_idx, img_hw,
camera_height,
view_args, preset_dis_u)
if optimization:
pred_lwh, err_score = solveLwh(cfg, output, img_idx, pred_lwh, img_hw)
pred_peaks, pred_probs = extractPredPeaks(cfg, output, img_idx)
pred_lines = allLinesConvert(cfg, pred_peaks, img_hw, -pred_lwh[4], pred_lwh[5], pred_lwh[[3, 1, 2, 0]],
view_args, pred_probs)
corner_method = cfg.POST_PROCESS.get("CORNER_METHOD", "lwh")
if corner_method == "lwh":
pred_cor_id_np, z0, z1 = cvtPredLwhToEquirecCornerCoords(pred_lwh, e_img_hw)
elif corner_method == "emask":
pred_cor_id_np, z0, z1 = calPredCorIdByEMask(cfg, output["p_preds_emask"][img_idx])
pred_cor_id = pred_lwh.new_tensor(pred_cor_id_np)
# 统一到GT所在的device上进行计算
pred_lines, pred_lwh = pred_lines.to(gt_lines.device), pred_lwh.to(gt_lwh.device)
# iou3d = calculateIOU(pred_lwh.unsqueeze(0), gt_lwh.unsqueeze(0))[0, 0]
# iou2d = calculateIOU(pred_lwh[:4].unsqueeze(0), gt_lwh[:4].unsqueeze(0))[0, 0]
# metrics = {
# "box_iou3d": iou3d,
# "box_iou2d": iou2d
# }
metrics = {}
# 调用HorizonNet的算指标代码进行计算
t = {}
if cfg.POST_PROCESS.get("TEST_WITH_BOTH", False):
test_general(pred_cor_id_np, gt_cor_id.cpu().numpy(), e_img_hw[1], e_img_hw[0], t)
t["3DIoU-general"] = t["3DIoU"]
test(pred_cor_id_np, z0, z1, gt_cor_id.cpu().numpy(), e_img_hw[1], e_img_hw[0], t)
elif cfg.DATA.TYPE == "cuboid":
test(pred_cor_id_np, z0, z1, gt_cor_id.cpu().numpy(), e_img_hw[1], e_img_hw[0], t)
elif cfg.DATA.TYPE == "general":
test_general(pred_cor_id_np, gt_cor_id.cpu().numpy(), e_img_hw[1], e_img_hw[0], t)
else:
assert False
for k in t:
metrics[k] = pred_lwh.new_tensor(t[k]) if not isinstance(t[k], str) else t[k]
if pred_extra is not None:
metrics.update(pred_extra)
return (gt_lines, gt_lwh, gt_cor_id), (pred_lines, pred_lwh, pred_cor_id), metrics, err_score
def LayoutNetv2PostProcessWrapper(cfg, input, output, img_idx) -> Tuple[
Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Dict[
str, torch.Tensor]]:
"""
:return Tensor<n,2>角点坐标,可以直接喂给test_general函数的那种
"""
e_img_hw = input["e_img"].shape[-2:]
gt_cor_id = input["cor"][img_idx]
generatePred2DMask(cfg, input, output, img_idx)
pred_cor_id_np = LayoutNetv2PostProcessMain(output["p_preds_emask"][img_idx].mean(0).cpu().numpy(),
output["p_preds_emask"][img_idx].permute(1, 2, 0).cpu().numpy())
pred_cor_id = input["cor"][img_idx].new_tensor(pred_cor_id_np)
metrics = {}
# 调用HorizonNet的算指标代码进行计算
t = {}
if cfg.DATA.TYPE == "cuboid":
assert False
test(pred_cor_id_np, z0, z1, gt_cor_id.cpu().numpy(), e_img_hw[1], e_img_hw[0], t)
elif cfg.DATA.TYPE == "general":
test_general(pred_cor_id_np, gt_cor_id.cpu().numpy(), e_img_hw[1], e_img_hw[0], t)
else:
assert False
for k in t:
metrics[k] = output["p_preds_emask"][img_idx].new_tensor(t[k]) if not isinstance(t[k], str) else t[k]
return (None, None, gt_cor_id), (None, None, pred_cor_id), metrics
def postProcess(cfg, input, output, img_idx, is_valid_mode=False, camera_height=1.6, view_args=VIEW_ARGS,
preset_dis_u=1.6) -> Tuple[
Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Dict[
str, torch.Tensor]]:
"""
:return (gt_lines, gt_lwh, gt_cor_id), (pred_lines, pred_lwh, pred_cor_id), metrics
"""
method = cfg.POST_PROCESS.METHOD if not (is_valid_mode and ("METHOD_WHEN_VALID" in cfg.POST_PROCESS)) \
else cfg.POST_PROCESS.METHOD_WHEN_VALID
if method == "None" or method is None:
return (None, None, input["cor"][img_idx]), (None, None, None), {}
elif method == "geometry" or method == "optimization":
return calMetrics_PretendDisUThenOptimIOU(cfg, input, output, img_idx, method == "optimization", camera_height,
view_args, preset_dis_u)
elif method == "LayoutNetv2":
return LayoutNetv2PostProcessWrapper(cfg, input, output, img_idx)
elif method == "develop" or method == "noncuboid":
from postprocess.noncuboid import nonCuboidPostProcess
return nonCuboidPostProcess(cfg, input, output, img_idx)
else:
assert False, "不支持的POST_PROCESS.METHOD"
def calculateIOU(boxes1: torch.Tensor, boxes2: torch.Tensor):
"""
box格式:(x-,x+,y-,y+,z-,z+)。z-、z+可以不提供。
要求同一轴上的元素,小的必须在大的的前面,否则无法算出正确结果!
"""
assert torch.all(boxes1[:, ::2] <= boxes1[:, 1::2]) and torch.all(boxes2[:, ::2] <= boxes2[:, 1::2])
boxes1 = boxes1.unsqueeze(1)
boxes2 = boxes2.unsqueeze(0)
boxes1_volume = torch.clamp(boxes1[:, :, 1::2] - boxes1[:, :, 0::2], min=0).prod(dim=2)
boxes2_volume = torch.clamp(boxes2[:, :, 1::2] - boxes2[:, :, 0::2], min=0).prod(dim=2)
boxes1 = boxes1.repeat(1, len(boxes2), 1)
boxes2 = boxes2.repeat(len(boxes1), 1, 1)
mixed = torch.stack([boxes1, boxes2], dim=3)
distance = mixed[:, :, 1::2].min(dim=3)[0] - mixed[:, :, 0::2].max(dim=3)[0]
intersect = torch.clamp(distance, min=0).prod(dim=2)
iou = intersect / (boxes1_volume + boxes2_volume - intersect)
return iou
def calLineAngleTan(x, fov: int):
tan = np.tan(np.deg2rad(fov / 2)) if fov != 90 else 1.0
return (1 - x) * tan
def lineCoordToRatio(cfg, line: List[torch.Tensor], img_hw):
result = [t.clone() for t in line]
# 10.12由于和linesPostProcess相同的原因,x、y线处理加上了-1。中心线原来算的就已经是对的了,不需要再处理了。
# y不变线处理
yline = result[1]
yDownMask = yline >= (img_hw[0] - 1) / 2
yline[yDownMask] = (img_hw[0] - 1) - yline[yDownMask]
result[1] = yline / ((img_hw[0] - 1) / 2)
# x不变线处理
xline = result[0]
xRightMask = xline >= (img_hw[1] - 1) / 2
xline[xRightMask] = (img_hw[1] - 1) - xline[xRightMask]
result[0] = xline / ((img_hw[1] - 1) / 2)
# 角度线处理:
if cfg.MODEL.HOUGH.CLINE_TYPE == "NEW":
# 返回的是与铅垂线夹角的tan值。这个值直接乘以已知的相机高度/到天花板的距离,所得结果直接就是x坐标了。
h2, w2 = (img_hw[0] - 1) / 2, (img_hw[1] - 1) / 2
h2f = img_hw[0] // 2
# cup线
cupline = result[2]
mask1 = cupline < h2f - 1
mask3 = cupline > h2f + img_hw[1] - 2
mask2 = torch.logical_not(torch.logical_or(mask1, mask3))
cupline[mask1] = w2 / (((h2f - 1) - cupline[mask1]) - h2)
cupline[mask2] = ((cupline[mask2] - (h2f - 1)) - w2) / h2
cupline[mask3] = -w2 / ((cupline[mask3] - (h2f + img_hw[1] - 2)) - h2)
result[2] = cupline
# cdown线
cdownline = result[3]
mask1 = cdownline < h2f - 1
mask3 = cdownline > h2f + img_hw[1] - 2
mask2 = torch.logical_not(torch.logical_or(mask1, mask3))
cdownline[mask1] = w2 / (h2 - ((h2f - 1) - cdownline[mask1]))
cdownline[mask2] = (w2 - (cdownline[mask2] - (h2f - 1))) / h2
cdownline[mask3] = -w2 / (h2 - (cdownline[mask3] - (h2f + img_hw[1] - 2)))
result[3] = cdownline
else:
raise NotImplementedError()
return result
def gtVisualize(cfg, lines: List[List[torch.Tensor]], img_hw, camera_height=1.6):
"""
Open3D自带的坐标轴中,红色是x轴,绿色是y轴,蓝色是z轴!
显示的线,红色表示x不变线,绿色表示y不变线,蓝色表示过中心的线!
"""
lines_results, layout_param = linesGTProcess_PretendDisUThenOptimIOU(cfg, lines, img_hw)
print(layout_param)
from visualization import o3dRunVis, o3dDrawLines, o3dInitVis
vis = o3dInitVis()
o3dDrawLines(vis, lines_results, layout_param)
o3dRunVis(vis)
def cvtPredLwhToEquirecCornerCoords(lwh, e_img_hw, z0=50):
"""
将估计出的房间lwh格式的数据,转化为HorizonNet的eval_cuboid.py中的test函数兼容的格式。
:param lwh <6> -x, x, -y, y, -z, z
:return dt_cor_id, z0, z1
"""
z1 = lwh[4] / lwh[5] * z0
# 构造8个uv坐标
def lwhToUv(xyz):
u = torch.atan2(xyz[0], xyz[1])
v = torch.atan(xyz[2] / torch.norm(xyz[0:2]))
return torch.stack((u, v))
uvs = []
for i in range(8):
uv = lwhToUv(lwh[[(i // 4) % 2 + 0, (i // 2) % 2 + 2, (i // 1) % 2 + 4]].cpu())
uvs.append(uv)
uvs = torch.stack(uvs)
coords = py360convert.uv2coor(uvs.cpu().numpy(), *e_img_hw)
indices = np.repeat(np.argsort(coords[1::2, 0]) * 2, 2)
indices[0::2] += 1
coords = coords[indices]
return coords, z0, z1.item()
vote_mask_c_up_down = None
def get_vote_mask_c_up_down(cfg, p_img):
global vote_mask_c_up_down
if vote_mask_c_up_down is None:
u, d = PerspectiveE2PP2E.makeVoteMaskStatic(cfg.MODEL.HOUGH.CLINE_TYPE, p_img.shape[-2:], p_img.device)
vote_mask_c_up_down = torch.cat([u, d], -1)
return vote_mask_c_up_down
def _cal_p_pred_2d_mask(cfg, input, img_idx, p_pred_xy_oneimage, p_pred_cud_oneimage):
p_imgs = input["p_imgs"][img_idx]
# 生成六个面的mask
result_2dmask = []
for view_idx, p_img in enumerate(p_imgs):
# xLine的mask
prob = p_pred_xy_oneimage[view_idx, :, 0]
x_mat = prob.unsqueeze(0).expand(p_img.shape[1], -1)
# yLine的mask
prob = p_pred_xy_oneimage[view_idx, :, 1]
y_mat = prob.unsqueeze(1).expand(-1, p_img.shape[2])
# cLine的mask
def _genProb(probs):
"""
把长为(angle_num,2)的,最后一维依次表示cup和cdown的霍夫域上的向量,拼接起来变为,cdown接在cup上的向量
"""
# return torch.cat([probs[:, i] for i in range(probs.shape[1])]) # 该形式是原始的定义,与下面的完全等价但更复杂
return probs.T.reshape(-1)
prob = _genProb(p_pred_cud_oneimage[view_idx])
vote_mask_c_up_down = get_vote_mask_c_up_down(cfg, p_img)
c_mat = (prob * vote_mask_c_up_down).sum(-1) / vote_mask_c_up_down.sum(-1)
result3 = torch.stack([x_mat, y_mat, c_mat])
result_2dmask.append(result3)
result_2dmask = torch.stack(result_2dmask)
return result_2dmask
def _cvt_xyc_p_pred_2d_mask_to_wallceilfloor(cfg, result_2dmask, img_hw):
# 将xyc红绿蓝转为竖直、天花板、地板红绿蓝
zeros = result_2dmask.new_zeros(*img_hw)
wallceilfloor_2dmask = []
for view_idx, mask in enumerate(result_2dmask):
if VIEW_NAME[view_idx] == "U":
res = torch.stack([mask[2], (mask[0] + mask[1]) / 2, zeros], 0)
elif VIEW_NAME[view_idx] == "D":
res = torch.stack([mask[2], zeros, (mask[0] + mask[1]) / 2], 0)
else:
yAndC = (mask[1] + mask[2]) / 2
halfHeight = yAndC.shape[0] // 2
res = torch.stack([mask[0], torch.cat([yAndC[0:halfHeight], zeros[halfHeight:]], 0),
torch.cat([zeros[0:halfHeight], yAndC[halfHeight:]], 0)], 0)
wallceilfloor_2dmask.append(res)
wallceilfloor_2dmask = torch.stack(wallceilfloor_2dmask, 0)
return wallceilfloor_2dmask
def _cal_p_pred_emask(cfg, result_2dmask, img_hw, e_img_hw):
zeros = result_2dmask.new_zeros(result_2dmask.shape[1], *img_hw)
# 将六个面的mask转到全景图上
cube_mask = torch.cat(
[torch.cat([zeros, result_2dmask[4], zeros, zeros], dim=2),
torch.cat([*result_2dmask[[3, 0, 1, 2]]], dim=2),
torch.cat([zeros, result_2dmask[5], zeros, zeros], dim=2)], dim=1)
equal_mask = py360convert.c2e(cube_mask.permute(1, 2, 0).cpu().numpy(), *e_img_hw)
equal_mask = torch.tensor(equal_mask, device="cpu").permute(2, 0, 1) # c2e步骤后不再送回显卡,而是保持在CPU上供可视化等使用
return equal_mask
def generatePred2DMask(cfg, input, output, img_idx):
if "p_preds_2dmask" not in output: output["p_preds_2dmask"] = [None] * len(input["p_imgs"])
if "p_preds_emask" not in output: output["p_preds_emask"] = [None] * len(input["p_imgs"])
if output["p_preds_2dmask"][img_idx] is not None: return
img_hw = input["p_imgs"][img_idx].shape[2:4]
e_img_hw = input["e_img"][img_idx].shape[1:3]
result_2dmask = _cal_p_pred_2d_mask(cfg, input, img_idx, torch.sigmoid(output["p_preds_xy"][img_idx]),
torch.sigmoid(output["p_preds_cud"][img_idx]))
wallceilfloor_2dmask = _cvt_xyc_p_pred_2d_mask_to_wallceilfloor(cfg, result_2dmask, img_hw)
equal_mask = _cal_p_pred_emask(cfg, wallceilfloor_2dmask, img_hw, e_img_hw)
output["p_preds_2dmask"][img_idx] = result_2dmask
output["p_preds_emask"][img_idx] = equal_mask
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--cfg_file', type=str, required=True, help='specify the config for training')
parser.add_argument('--ckpt', required=True, help='checkpoint for evaluation')
parser.add_argument('--visu_count', default=20, type=int, help='visualize how many batches')
parser.add_argument('--batch_size', default=1, type=int, help='mini-batch size')
# Model related
parser.add_argument('--backbone',
default='drn38',
choices=ENCODER_RESNET + ENCODER_DENSENET + ENCODER_HOUGH,
help='backbone of the network')
parser.add_argument('--no_rnn', action='store_true', help='whether to remove rnn or not')
parser.add_argument('--no_multigpus', action='store_true', help='disable data parallel')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
device = torch.device('cuda')
dataset_valid = PerspectiveDataset(cfg, "test")
loader_valid = DataLoader(dataset_valid,
args.batch_size,
collate_fn=dataset_valid.collate,
shuffle=False,
drop_last=False,
num_workers=0,
pin_memory=True)
iterator_valid = iter(loader_valid)
net = DMHNet(cfg, args.backbone, not args.no_rnn).to(device)
if not args.no_multigpus:
net = nn.DataParallel(net) # multi-GPU
print(str(cfg.POST_PROCESS))
state_dict = pipeload(args.ckpt, map_location='cpu')["state_dict"]
net.load_state_dict(state_dict, strict=True)
net.eval()
count = 0
DBG_START = 0
for valid_idx in trange(args.visu_count, desc='PostProcess Visualization', position=2):
input = next(iterator_valid)
with torch.no_grad():
for k in input:
if isinstance(input[k], torch.Tensor):
input[k] = input[k].to(device)
_, results_dict = net(input)
for i in range(len(input["filename"])):
count += 1
if count <= DBG_START:
continue
(gt_lines, gt_lwh, gt_cor_id), (pred_lines, pred_lwh, pred_cor_id), metric = postProcess(cfg, input,
results_dict,
i)
print("{:s} pred{:s} gt{:s}".format(str(metric), str(pred_lwh), str(gt_lwh)))
# 画方框的代码
from visualization import o3dRunVis, o3dDrawLines, o3dInitVis
vis = o3dInitVis()
o3dDrawLines(vis, gt_lines, gt_lwh, [1.0, 0.0, 0.0])
o3dDrawLines(vis, pred_lines, pred_lwh)
o3dRunVis(vis)
# 画角点的代码
from visualization import drawEqualRectCorners
drawEqualRectCorners(plt, input["e_img"][i], gt_cor_id, pred_cor_id)
plt.show()
| 44,265 | 42.060311 | 120 | py |
DMH-Net | DMH-Net-main/postprocess/GDSolver.py | import math
import torch
from torch import nn, optim
def solve(module: nn.Module, *inputs, lr=1e-2, tol=1e-4, max_iter=10000, optimizer=None, stop_tol=None, stop_range=None,
return_best=True, **kwargs):
history = None
if stop_range is not None:
assert stop_tol is not None
best_loss = None
best_state_dict = None
with torch.enable_grad():
if optimizer is None:
optimizer = optim.SGD(module.parameters(), lr=lr)
for i in range(max_iter):
result = module(*inputs, **kwargs)
loss = torch.abs(result) # 优化目标固定设为方程=0
if return_best and (best_loss is None or loss < best_loss):
best_loss = loss.clone()
state_dict = module.state_dict()
best_state_dict = {k: state_dict[k].clone() for k in state_dict}
optimizer.zero_grad()
loss.backward()
optimizer.step()
if loss < tol:
break
if stop_range is not None:
if history is None:
history = torch.ones(stop_range, device=result.device, dtype=torch.float32) * math.inf
history[i % stop_range] = result.item()
if history.max() - history.min() < stop_tol:
break
if return_best:
# 最优的state_dict读回模型,返回
module.load_state_dict(best_state_dict)
return module
| 1,452 | 32.022727 | 120 | py |
DMH-Net | DMH-Net-main/postprocess/noncuboid.py | import traceback
import warnings
from typing import List, Tuple
import numpy as np
import torch
from easydict import EasyDict
from torch import nn
from e2pconvert_torch.e2plabelconvert import generateOnePerspectiveLabel
from e2pconvert_torch.torch360convert import coor2uv, xyz2uv, uv2unitxyz, uv2coor
from e2plabel.e2plabelconvert import VIEW_NAME, VIEW_ARGS, VIEW_SIZE
from eval_general import test_general
from perspective_dataset import PerspectiveDataset
from postprocess.GDSolver import solve
from postprocess.LayoutNetv2 import map_coordinates_Pytorch
from postprocess.postprocess2 import generatePred2DMask, _cal_p_pred_emask, findPeaks, \
lossFunction, calMetrics_PretendDisUThenOptimIOUForV2
FILE_LIST = [
"7y3sRwLe3Va_1679b5de39e548d38ba240f2fd99cae9.png",
"7y3sRwLe3Va_1679b5de39e548d38ba240f2fd99cae9.png",
"7y3sRwLe3Va_b564162b2c7d4033bfe6ef3dfb959c9e.png",
"7y3sRwLe3Va_fdab6422162e49db822a37178ab70481.png",
"B6ByNegPMKs_0e5ba44387774783903fea2a1b8f53dd.png",
"B6ByNegPMKs_4c769d1a658d41eb995deb5b40af57a4.png",
"7y3sRwLe3Va_5c39473b25b74307858764d1a2045b9e.png",
"7y3sRwLe3Va_6376b741b50a4418b3dc3fde791c3c09.png",
]
def nonCuboidPostProcess(cfg, input, output, img_idx):
if cfg.get("VISUALIZATION", {}).get("TYPE") is None:
cfg.VISUALIZATION = EasyDict()
cfg.VISUALIZATION.TYPE = [["c", "y", "x", "e_rm", "gtlines", "text"]]
generatePred2DMask(cfg, input, output, img_idx)
mask2ds = output["p_preds_2dmask"][img_idx]
gt_cor_id = input["cor"][img_idx]
gt_cor_id_np = gt_cor_id.cpu().numpy()
cor_mask2ds = torch.stack([
mask2ds[:, 0] * mask2ds[:, 1],
mask2ds[:, 1] * mask2ds[:, 2],
mask2ds[:, 0] * mask2ds[:, 2],
], 1)
if cfg.POST_PROCESS.get("COR_IMG_CAL") is None:
cor_mask2ds = cor_mask2ds.mean(1)
elif cfg.POST_PROCESS.get("COR_IMG_CAL") == "max":
cor_mask2ds = cor_mask2ds.max(1)[0]
elif cfg.POST_PROCESS.get("COR_IMG_CAL") == "merge":
cor_mask2ds = (cor_mask2ds.sum(1) + cor_mask2ds.max(1)[0] * 2) / 5
img_hw = input["p_imgs"][img_idx].shape[2:4]
e_img_hw = input["e_img"][img_idx].shape[1:3]
cor_img = _cal_p_pred_emask(cfg, cor_mask2ds.unsqueeze(1), img_hw, e_img_hw).squeeze(0)
column_img = cor_img.max(0)[0]
columns = [] # 长度为n的数组,前4、6、8...代表4、6、8时的初始化
ranges = [(e_img_hw[1] // 4 * i, e_img_hw[1] // 4 * (i + 1)) for i in range(4)]
scores = [column_img[r[0]:r[1]] for r in ranges]
peakss = [findPeaks(cfg, r).to(torch.int64) for r in scores]
peakss = [r[s[r].argsort(descending=True)] for r, s in zip(peakss, scores)]
# 先从每个四分之一图找一个点
columns.extend([(r[0] if len(r) > 0 else r.new_tensor(e_img_hw[1] // 4 // 4)) + begin for r, (begin, _) in
zip(peakss, ranges)])
peakss_2 = torch.cat([r[1:] + begin for r, (begin, _) in zip(peakss, ranges)])
# 剩下的中按置信度排序
peakss_2 = peakss_2[column_img[peakss_2].argsort(descending=True)]
columns.extend(peakss_2)
columns = torch.stack(columns)
res_v2 = None
all_results = []
# for cor_num in [6]:
METHODS = []
METHODS.extend([(n, "v2") for n in cfg.POST_PROCESS.get("COMBINE", {}).get("V2", [4,6,8,10,12])])
METHODS.extend([(n, "v1") for n in cfg.POST_PROCESS.get("COMBINE", {}).get("V1", [4,6,8,10,12])])
for cor_num, method in METHODS:
# TODO: 设置length恒为1,看看到底是直线好还是线段好;
# TODO: 设置loss聚合的方法,比如求和?平均?
if method == "v0":
if len(columns) < cor_num: continue
columns_one = columns[0:cor_num].sort()[0] # 记得一定要排序!!!!
cor_img_columns = cor_img[:, columns_one]
upper_y = cor_img_columns[:img_hw[0] // 2].argmax(0)
lower_y = cor_img_columns[img_hw[0] // 2:].argmax(0) + (img_hw[0] // 2)
init_cors = torch.cat(
[cor_img.new_tensor([[c, u], [c, l]]) for c, u, l in zip(columns_one, upper_y, lower_y)], 0)
# cornersToPeaks(cfg, input, img_idx, init_cors)
# pred_cor_id=init_cors
# metrics = {}
pred_cor_id, err_score = solveCorners(cfg, input, output, img_idx, init_cors, e_img_hw)
else:
if method == "v2":
if res_v2 is None:
res_v2 = generate2DFrameFromColumnPeaksV2(cfg, columns, cor_img, ranges)
res_v2_0, init_cors_all = res_v2 # init_cors现在由V2主函数返回
res_frame = generate2DFrameFromColumnPeaksV2_ChooseCorNum(res_v2_0, columns, cor_num, ranges)
if res_frame is None:
continue
choice_idx, beginFromZ = res_frame
choice_corner_idx = torch.cat([torch.stack([2 * v, 2 * v + 1]) for v in choice_idx])
init_cors = init_cors_all[choice_corner_idx]
elif method == "v1":
res_frame = generate2DFrameFromColumnPeaks(columns, cor_img, cor_num)
if res_frame is None:
continue
choice_idx, beginFromZ = res_frame
columns_one = columns[choice_idx]
cor_img_columns = cor_img[:, columns_one]
upper_y = cor_img_columns[:img_hw[0] // 2].argmax(0)
lower_y = cor_img_columns[img_hw[0] // 2:].argmax(0) + (img_hw[0] // 2)
init_cors = torch.cat(
[cor_img.new_tensor([[c, u], [c, l]]) for c, u, l in zip(columns_one, upper_y, lower_y)], 0)
# pred_cor_id = init_cors
if not cfg.POST_PROCESS.get("COMBINE", {}).get("SOLVE3", False):
pred_cor_id, err_score = solveCorners2(cfg, input, output, img_idx, init_cors, cor_img, beginFromZ,
e_img_hw)
else:
pred_cor_id, err_score = solveCorners3(cfg, input, output, img_idx, init_cors, cor_img, beginFromZ,
e_img_hw)
# cornersToPeaks(cfg, input, img_idx, pred_cor_id)
# metrics = {}
pred_cor_id_np = pred_cor_id.cpu().numpy()
one_result = {"cor_num": cor_num, "err_score": err_score, "pred_cor": pred_cor_id, "method": method}
metric = {}
# 调用HorizonNet的算指标代码进行计算
t = {}
if cfg.DATA.TYPE == "cuboid":
assert False
test(pred_cor_id_np, z0, z1, gt_cor_id_np, e_img_hw[1], e_img_hw[0], t)
elif cfg.DATA.TYPE == "general":
test_general(pred_cor_id_np, gt_cor_id_np, e_img_hw[1], e_img_hw[0], t)
else:
assert False
for k in t:
metric[k] = torch.tensor(t[k]) if not isinstance(t[k], str) else t[k]
one_result["metrics"] = metric
all_results.append(one_result)
if cfg.POST_PROCESS.get("COMBINE", {}).get("OPTIM", True):
_, (_, _, pred_cor_id_optim), metric_optim, err_score_optim = \
calMetrics_PretendDisUThenOptimIOUForV2(cfg, input, output, img_idx, True)
all_results.append({"cor_num": 4, "err_score": err_score_optim, "pred_cor": pred_cor_id_optim, "method": "optim", "metrics": metric_optim})
METRIC_KEYS=["2DIoU", "3DIoU", "rmse", "delta_1"]
err_scores = torch.tensor([one_result["err_score"] for one_result in all_results])
best_result_idx = err_scores.argmin()
best_result_cor_num = all_results[best_result_idx]["cor_num"]
pred_cor_id = all_results[best_result_idx]["pred_cor"]
metrics = {}
metrics.update(all_results[best_result_idx]["metrics"])
# all_results中直接挑指标最好的
metrics_all = {k: torch.stack([one_result["metrics"][k] for one_result in all_results]) for k in METRIC_KEYS}
metrics_best = {"best/" + k: v.max() for k, v in metrics_all.items()}
metrics.update(metrics_best)
metrics["pred_cor_num"] = torch.tensor(float(best_result_cor_num))
# 但是,仍然要打印所有的结果
additional_metrics = {}
for one_result in all_results:
additional_metrics.update(
{one_result["method"] + "/" + str(one_result["cor_num"]) + "/" + k: one_result["metrics"][k] for k in one_result["metrics"]})
additional_metrics.update({one_result["method"] + "/" + str(one_result["cor_num"]) + "/err_score": one_result["err_score"]})
print(additional_metrics)
return (None, None, gt_cor_id), (None, None, pred_cor_id), metrics
def cornersToPeaks(cfg, input, img_idx, cors, isInputUv=False) -> List[List[Tuple[torch.Tensor, torch.Tensor]]]:
result = []
for view_idx, (view_name, view) in enumerate(zip(VIEW_NAME, VIEW_ARGS)):
r = generateOnePerspectiveLabel(input["e_img"][img_idx], cors, *view, VIEW_SIZE, isInputUv=isInputUv)
# input["lines"][img_idx][view_idx] = r["lines"] # TODO
peakss, lengths = PerspectiveDataset.linesToPeaksNewCore([line[3:8] for line in r["lines"]], VIEW_SIZE)
xPeaks, yPeaks, cUpPeaks, cDownPeaks = ((torch.stack(p) if len(p) > 0 else cors.new_zeros(0)) for p in peakss)
xLength, yLength, cUpLength, cDownLength = ((torch.stack(p) if len(p) > 0 else cors.new_zeros(0)) for p in
lengths)
result.append([(xPeaks, xLength), (yPeaks, yLength), (cUpPeaks, cUpLength), (cDownPeaks, cDownLength)])
return result
def solveCorners(cfg, input, output, img_idx, start_corners, e_img_hw):
"""
参数定义:共(n*2+1)个,其中n是4,6,8...(len(start_corners)=2*n)
分别表示角点的x值、天花板的tan值、地板tan除以天花板tan。
"""
uv = coor2uv(start_corners, *e_img_hw)
xs = start_corners[::2, 0]
tanceil = torch.tan(uv[::2, 1])
tanratio = (torch.tan(uv[1::2, 1]) / torch.tan(uv[::2, 1])).mean()
class CornersSolveModule(nn.Module):
def __init__(self):
super().__init__()
self.xs = nn.Parameter(xs.clone().detach().requires_grad_(True))
self.tanceil = nn.Parameter(tanceil.clone().detach().requires_grad_(True))
self.tanratio = nn.Parameter(tanratio.clone().detach().requires_grad_(True))
def toCors(self):
tanfloor = self.tanceil * self.tanratio
tanceilfloor = []
for c, f in zip(self.tanceil, tanfloor):
tanceilfloor.extend((c, f))
vs = torch.atan(torch.stack(tanceilfloor))
coor_y = (-vs / np.pi + 0.5) * e_img_hw[0] - 0.5
cors = torch.stack([self.xs.repeat_interleave(2), coor_y], 1)
return cors
def forward(self):
cors = self.toCors()
try:
peaks = cornersToPeaks(cfg, input, img_idx, cors)
return lossFunction(output, img_idx, peaks)
except:
traceback.print_exc()
warnings.warn("CornersSolveModule forward时抛出异常")
return torch.tensor(20., device=cors.device, requires_grad=True)
module = CornersSolveModule().to("cpu")
module = solve(module, max_iter=100, lr=1e-2, stop_tol=1e-3, stop_range=5)
return module.toCors(), torch.abs(module.forward())
def generate2DFrameFromColumnPeaksV2(cfg, column_peaks, cor_img, ranges):
"""
:param column_peaks (x) 不定长vector。所有的列peak,要求按照置信度从大到小排好序;且其中前四个元素必须恰好位于四个方向上。
:return None或Tuple[Tensor<cor_num>, bool]。Tensor<cor_num>是column_peaks中应被选中的元素的 **下标**,bool表示chosen[0]到chosen[1]之间的连线应该沿z(前后)方向。
"""
# 只用前4个点,求出一个假想高度,把所有peak投射到xz空间里去
column_peaks, sortedIdxs = column_peaks.sort()
oldIdxToNewIdx = torch.cat([torch.where(sortedIdxs == v)[0] for v in range(len(sortedIdxs))])
def getXz():
img_hw = cor_img.shape
columns_one = column_peaks
cor_img_columns = cor_img[:, columns_one]
upper_y = cor_img_columns[:img_hw[0] // 2].argmax(0)
lower_y = cor_img_columns[img_hw[0] // 2:].argmax(0) + (img_hw[0] // 2)
init_cors = torch.cat(
[cor_img.new_tensor([[c, u], [c, l]]) for c, u, l in zip(columns_one, upper_y, lower_y)], 0)
uv = coor2uv(init_cors, *img_hw)
tanv = torch.tan(uv[:, 1])
# xs = start_corners[::2, 0]
# tanceil = torch.tan(uv[::2, 1])
# 第一步:计算height,并把所有的点都只移动v坐标调整到同一高度上。
tanratio = tanv[0::2] / tanv[1::2]
# !!!只用传进来的前4个点做支撑点!注意要用oldIdxToNewIdx做idx转换
if cfg.POST_PROCESS.V2.get("REMOVE_BAD_GROUND_POINT") == "both":
prob_ground = map_coordinates_Pytorch(cor_img, init_cors[1::2][oldIdxToNewIdx[0:4]].T.flip(0))
the_mask = chooseByProb(cfg, prob_ground)
aim_tanratio = tanratio[oldIdxToNewIdx[0:4][the_mask]].mean()
else:
aim_tanratio = tanratio[oldIdxToNewIdx[0:4]].mean()
v_adjust_factor = torch.sqrt(tanratio / aim_tanratio)
v_adjust_vector = torch.cat([torch.stack([1 / v, v]) for v in v_adjust_factor])
tanv = tanv * v_adjust_vector
uv[:, 1] = torch.atan(tanv)
# 第二步:构建初始边框
# 所有的点uv已知,投影到xyz上、固定y轴的值为height或-1.6,得到x和z
# 根据beginFromZ和置信度,refine每个点的x坐标和z坐标。
xyz = uv2unitxyz(uv)
# xyz[0::2] = xyz[0::2] / xyz[0::2, 1] * height
# xyz[1::2] = xyz[1::2] / xyz[1::2, 1] * -1.6
xz = (xyz[1::2] / xyz[1::2, 1:2] * -1.6)[:, [0, 2]]
e_coor = uv2coor(uv, *cor_img.shape)
prob = map_coordinates_Pytorch(cor_img, e_coor.T.flip(0))
prob = (prob[0::2] + prob[1::2]) / 2
# TODO 问题:用进入下一阶段的点坐标用什么?refine过一轮的?还是没refine的?
coor = e_coor if cfg.POST_PROCESS.V2.REFINE_V_TWICE else init_cors
oldIdxToNewCornerIdx = torch.cat([torch.stack([2 * v, 2 * v + 1]) for v in oldIdxToNewIdx])
coor = coor[oldIdxToNewCornerIdx]
return xz, prob, coor
xz, prob, coor = getXz()
# TODO 问题:用什么概率?是新的点概率,还是列概率?
if cfg.POST_PROCESS.V2.FIRST_COLUMN_PROB:
column_img = cor_img.max(0)[0]
prob = column_img[column_peaks]
l = len(xz)
# 逐点连线计算方向
delta = xz - torch.cat([xz[1:], xz[0:1]])
absdelta = torch.abs(delta)
lineIsZ: torch.Tensor = (absdelta[:, 0] < absdelta[:, 1]) # 从i到i+1的线是否沿z方向
# 选择【概率最高的、两边连线是x和z方向】的点开始
probIdx = prob.argsort(descending=True)
resultIdxs = None
for v in probIdx:
if lineIsZ[(v - 1 + l) % l] == lineIsZ[v]: continue # 两侧直线方向相同,无效点
beginIdx = v.item()
resultIdxs = [beginIdx]
# 从beginIndex起,沿哪个方向遍历结果都是一样的,所以固定沿正方向走
cur = (beginIdx + 1) % l
beginFromZ = lineIsZ[beginIdx]
curDirect = beginFromZ
while cur != beginIdx:
if lineIsZ[cur] != curDirect:
resultIdxs.append(cur)
curDirect = lineIsZ[cur]
cur = (cur + 1) % l
# 验证操作的结果:只有操作后仍能保证全景图四个区域都还有peak,才准这么做
new_choice_peaks = column_peaks[sortedIdxs[resultIdxs]]
success = True
for r in ranges:
if torch.logical_and(r[0] <= new_choice_peaks, new_choice_peaks < r[1]).sum() <= 0:
success = False
break
if success:
break
else:
resultIdxs = None
if resultIdxs is None:
# 如果找不到任何可行解,则只能拿前四个点跑一遍算了
if len(column_peaks) > 4:
res_v2_0, _ = generate2DFrameFromColumnPeaksV2(cfg, column_peaks[oldIdxToNewIdx][0:4], cor_img, ranges)
return res_v2_0, coor
else:
# 递归后仍然不行,那就直接返回吧
resultIdxs = [0, 1, 2, 3]
beginFromZ = lineIsZ[0]
return (sortedIdxs[resultIdxs], prob[resultIdxs], beginFromZ), coor
def generate2DFrameFromColumnPeaksV2_ChooseCorNum(res_v2_0, column_peaks, cor_num, ranges):
choice_idx, choice_probs, beginFromZ = res_v2_0
assert len(choice_idx) % 2 == 0 and 4 <= cor_num <= 12 and cor_num % 2 == 0
if len(choice_idx) == cor_num:
return choice_idx, beginFromZ
elif len(choice_idx) < cor_num:
return None
else:
# 一次只删两个点,然后递归
# 寻找概率最低的线段,remove
line_prob = choice_probs + torch.cat([choice_probs[1:], choice_probs[0:1]])
idxSeq = line_prob.argsort()
new_choice_idx, new_choice_probs, new_beginFromZ = None, None, None
# 选择要delete的线:
for toRemoveIdx in idxSeq:
if toRemoveIdx < len(choice_idx) - 1:
# 直接两端拼接即可
new_choice_idx = torch.cat([choice_idx[0:toRemoveIdx], choice_idx[toRemoveIdx + 2:]])
new_choice_probs = torch.cat([choice_probs[0:toRemoveIdx], choice_probs[toRemoveIdx + 2:]])
new_beginFromZ = beginFromZ
else:
# 从1到l-1,且beginFromZ要反转
new_choice_idx = choice_idx[1:len(choice_idx) - 1]
new_choice_probs = choice_probs[1:len(choice_idx) - 1]
new_beginFromZ = not beginFromZ
# 验证操作的结果:只有操作后仍能保证全景图四个区域都还有peak,才准这么做
new_choice_peaks = column_peaks[new_choice_idx]
success = True
for r in ranges:
if torch.logical_and(r[0] <= new_choice_peaks, new_choice_peaks < r[1]).sum() <= 0:
success = False
break
if success:
# 如果验证通过,则说明成功减掉了两个点。
# 则递归下去继续减点,如果下层返回了值就是通过了,如果下层返回了None,则本层继续
new_res_v2_0 = (new_choice_idx, new_choice_probs, new_beginFromZ)
res = generate2DFrameFromColumnPeaksV2_ChooseCorNum(new_res_v2_0, column_peaks, cor_num, ranges)
if res is not None:
return res
# 如果走到这里还没成功递归出有效的结果,就是不行了
return None
def generate2DFrameFromColumnPeaks(column_peaks, cor_img, cor_num):
"""
:param column_peaks (x) 不定长vector。所有的列peak,要求按照置信度从大到小排好序;且其中前四个元素必须恰好位于四个方向上。
:return None或Tuple[Tensor<cor_num>, bool]。Tensor<cor_num>是column_peaks中应被选中的元素的 **下标**,bool表示chosen[0]到chosen[1]之间的连线应该沿z(前后)方向。
"""
assert 4 <= cor_num <= 12 and cor_num % 2 == 0
toChooseIdxs = list(range(4, len(column_peaks)))
chosen = list(column_peaks[0:4].sort()[0])
beginFromZ = True # chosen[0]到chosen[1]之间的连线应该沿z(前后)方向
while len(chosen) < cor_num:
if len(toChooseIdxs) < 2:
return None
beforeChosenCount = len(chosen)
idx0 = toChooseIdxs[0]
for i in range(1, len(toChooseIdxs)):
idx1 = toChooseIdxs[i]
insert_pos = torch.searchsorted(column_peaks.new_tensor(chosen), column_peaks[[idx0, idx1]])
insert_pos_dis = torch.abs(insert_pos[1] - insert_pos[0])
if insert_pos_dis <= 1 or insert_pos_dis == len(chosen):
if insert_pos_dis == len(chosen):
# 环形插入的,则beginFromY应反转
beginFromZ = not beginFromZ
if column_peaks[idx1] >= column_peaks[idx0]:
# idx1位置处元素的插入位置比idx0位置处元素靠后,先插入idx1位置处元素
chosen.insert(insert_pos[1], column_peaks[idx1])
chosen.insert(insert_pos[0], column_peaks[idx0])
else:
chosen.insert(insert_pos[0], column_peaks[idx0])
chosen.insert(insert_pos[1], column_peaks[idx1])
toChooseIdxs.remove(idx0)
toChooseIdxs.remove(idx1)
break
if len(chosen) - beforeChosenCount < 2:
# 尝试插入失败,则应该返回None
return None
return torch.cat([torch.where(column_peaks == v)[0] for v in chosen]), beginFromZ
def chooseByProb(cfg, prob):
MIN_COUNT = 2
if cfg.POST_PROCESS.get("COR_IMG_CAL") is None:
VALUE = 0.4
elif cfg.POST_PROCESS.get("COR_IMG_CAL") == "max":
VALUE = 0.7
elif cfg.POST_PROCESS.get("COR_IMG_CAL") == "merge":
VALUE = 0.6
result = prob > VALUE
result[prob.argsort(descending=True)[0:MIN_COUNT]] = True
return result
def solveCorners2(cfg, input, output, img_idx, start_corners: torch.Tensor, prob_map: torch.Tensor,
beginFromZ: bool, e_img_hw):
uv = coor2uv(start_corners, *e_img_hw)
tanv = torch.tan(uv[:, 1])
# xs = start_corners[::2, 0]
# tanceil = torch.tan(uv[::2, 1])
# 第一步:计算height,并把所有的点都只移动v坐标调整到同一高度上。
tanratio = tanv[0::2] / tanv[1::2]
if cfg.POST_PROCESS.V2.get("REMOVE_BAD_GROUND_POINT") == "second" or cfg.POST_PROCESS.V2.get("REMOVE_BAD_GROUND_POINT") == "both":
prob_ground = map_coordinates_Pytorch(prob_map, start_corners[1::2].T.flip(0))
the_mask = chooseByProb(cfg, prob_ground)
aim_tanratio = tanratio[the_mask].mean()
else:
aim_tanratio = tanratio.mean()
height = -1.6 * aim_tanratio # 根据(天花板tan/地板tan)的平均值确定初始高度
v_adjust_factor = torch.sqrt(tanratio / aim_tanratio)
v_adjust_vector = torch.cat([torch.stack([1 / v, v]) for v in v_adjust_factor])
tanv = tanv * v_adjust_vector
uv[:, 1] = torch.atan(tanv)
# 第二步:构建初始边框
# 所有的点uv已知,投影到xyz上、固定y轴的值为height或-1.6,得到x和z
# 根据beginFromZ和置信度,refine每个点的x坐标和z坐标。
xyz = uv2unitxyz(uv)
# xyz[0::2] = xyz[0::2] / xyz[0::2, 1] * height
# xyz[1::2] = xyz[1::2] / xyz[1::2, 1] * -1.6
xz = (xyz[1::2] / xyz[1::2, 1:2] * -1.6)[:, [0, 2]]
e_coor = uv2coor(uv, *prob_map.shape)
if cfg.POST_PROCESS.V2.SECOND_START_PROB:
prob_full = map_coordinates_Pytorch(prob_map, start_corners.T.flip(0))
else:
prob_full = map_coordinates_Pytorch(prob_map, e_coor.T.flip(0))
prob = (prob_full[0::2] + prob_full[1::2]) / 2
dis = torch.zeros_like(xz[:, 0])
l = len(dis)
for i in range(l):
if cfg.POST_PROCESS.V2.STRAIGHTEN_BY_PROB:
factor = torch.stack([prob[i], prob[(i + 1) % l]])
factor = factor / factor.sum()
if cfg.POST_PROCESS.V2.STRAIGHTEN_WHEN_BETTER:
if factor[0] > 0.75:
factor = factor.new_tensor([1.0, 0.0])
elif factor[1] > 0.75:
factor = factor.new_tensor([0.0, 1.0])
else:
factor = prob.new_tensor([0.5, 0.5])
if beginFromZ:
# xz[0]到xz[1]是沿z方向的,所以应该强迫i=0时x值相等,填入dis[0]处
if i % 2 == 0:
dis[i] = (torch.stack([xz[i, 0], xz[(i + 1) % l, 0]]) * factor).sum()
else:
dis[i] = (torch.stack([xz[i, 1], xz[(i + 1) % l, 1]]) * factor).sum()
else:
# 否则,xz[0]到xz[1]是沿x方向的,则应该强迫i=0时z值相等,填入1处
if i % 2 == 0:
dis[(i + 1) % l] = (torch.stack([xz[i, 1], xz[(i + 1) % l, 1]]) * factor).sum()
else:
dis[(i + 1) % l] = (torch.stack([xz[i, 0], xz[(i + 1) % l, 0]]) * factor).sum()
class CornersSolveModule(nn.Module):
def __init__(self):
super().__init__()
self.height = nn.Parameter(height.clone().detach().requires_grad_(True))
self.dis = nn.Parameter(dis.clone().detach().requires_grad_(True))
def toUvs(self):
cor_num = len(self.dis)
idxs = torch.arange(cor_num, device=self.dis.device)
idxs = torch.stack([idxs, (idxs + 1) % cor_num], 1)
idxs[1::2] = idxs[1::2].flip(1)
xyz = torch.cat([
torch.stack([self.dis[idxs[:, 0]], self.height.repeat(len(idxs)),
self.dis[idxs[:, 1]]], 1),
torch.stack([self.dis[idxs[:, 0]], self.dis.new_tensor(-1.6).repeat(len(idxs)),
self.dis[idxs[:, 1]]], 1),
], 0)
uv = xyz2uv(xyz)
# 进行排序,这步是必须的!
seq = uv[:cor_num, 0].argsort()
seq = seq.repeat_interleave(2)
seq[1::2] += cor_num
uv = uv[seq]
return uv
def forward(self):
# return torch.tensor(20., device=self.dis.device, requires_grad=True)
uv = self.toUvs()
try:
peaks = cornersToPeaks(cfg, input, img_idx, uv, isInputUv=True)
return lossFunction(output, img_idx, peaks)
except:
traceback.print_exc()
warnings.warn("CornersSolveModule forward时抛出异常")
return torch.tensor(20., device=uv.device, requires_grad=True)
module = CornersSolveModule().to("cpu")
module = solve(module, max_iter=100, lr=1e-2, stop_tol=1e-3, stop_range=5)
uvs = module.toUvs()
return uv2coor(uvs, *e_img_hw), torch.abs(module.forward())
def solveCorners3(cfg, input, output, img_idx, start_corners: torch.Tensor, prob_map: torch.Tensor,
beginFromZ: bool, e_img_hw):
uv = coor2uv(start_corners, *e_img_hw)
tanv = torch.tan(uv[:, 1])
# xs = start_corners[::2, 0]
# tanceil = torch.tan(uv[::2, 1])
# 第一步:计算height,并把所有的点都只移动v坐标调整到同一高度上。
tanratio = tanv[0::2] / tanv[1::2]
if cfg.POST_PROCESS.V2.get("REMOVE_BAD_GROUND_POINT") == "second" or cfg.POST_PROCESS.V2.get("REMOVE_BAD_GROUND_POINT") == "both":
prob_ground = map_coordinates_Pytorch(prob_map, start_corners[1::2].T.flip(0))
the_mask = chooseByProb(cfg, prob_ground)
aim_tanratio = tanratio[the_mask].mean()
else:
aim_tanratio = tanratio.mean()
height = -1.6 * aim_tanratio # 根据(天花板tan/地板tan)的平均值确定初始高度
v_adjust_factor = torch.sqrt(tanratio / aim_tanratio)
v_adjust_vector = torch.cat([torch.stack([1 / v, v]) for v in v_adjust_factor])
tanv = tanv * v_adjust_vector
uv[:, 1] = torch.atan(tanv)
# 第二步:构建初始边框
# 所有的点uv已知,投影到xyz上、固定y轴的值为height或-1.6,得到x和z
# 根据beginFromZ和置信度,refine每个点的x坐标和z坐标。
xyz = uv2unitxyz(uv)
# xyz[0::2] = xyz[0::2] / xyz[0::2, 1] * height
# xyz[1::2] = xyz[1::2] / xyz[1::2, 1] * -1.6
xz = (xyz[1::2] / xyz[1::2, 1:2] * -1.6)[:, [0, 2]]
e_coor = uv2coor(uv, *prob_map.shape)
if cfg.POST_PROCESS.V2.SECOND_START_PROB:
prob_full = map_coordinates_Pytorch(prob_map, start_corners.T.flip(0))
else:
prob_full = map_coordinates_Pytorch(prob_map, e_coor.T.flip(0))
prob = (prob_full[0::2] + prob_full[1::2]) / 2
dis = torch.zeros_like(xz[:, 0])
l = len(dis)
for i in range(l):
if cfg.POST_PROCESS.V2.STRAIGHTEN_BY_PROB:
factor = torch.stack([prob[i], prob[(i + 1) % l]])
factor = factor / factor.sum()
if cfg.POST_PROCESS.V2.STRAIGHTEN_WHEN_BETTER:
if factor[0] > 0.75:
factor = factor.new_tensor([1.0, 0.0])
elif factor[1] > 0.75:
factor = factor.new_tensor([0.0, 1.0])
else:
factor = prob.new_tensor([0.5, 0.5])
if beginFromZ:
# xz[0]到xz[1]是沿z方向的,所以应该强迫i=0时x值相等,填入dis[0]处
if i % 2 == 0:
dis[i] = (torch.stack([xz[i, 0], xz[(i + 1) % l, 0]]) * factor).sum()
else:
dis[i] = (torch.stack([xz[i, 1], xz[(i + 1) % l, 1]]) * factor).sum()
else:
# 否则,xz[0]到xz[1]是沿x方向的,则应该强迫i=0时z值相等,填入1处
if i % 2 == 0:
dis[(i + 1) % l] = (torch.stack([xz[i, 1], xz[(i + 1) % l, 1]]) * factor).sum()
else:
dis[(i + 1) % l] = (torch.stack([xz[i, 0], xz[(i + 1) % l, 0]]) * factor).sum()
class CornersSolveModule(nn.Module):
def __init__(self):
super().__init__()
self.height = nn.Parameter(height.clone().detach().requires_grad_(True))
self.dis = nn.Parameter(dis.clone().detach().requires_grad_(True))
def toUvs(self):
cor_num = len(self.dis)
idxs = torch.arange(cor_num, device=self.dis.device)
idxs = torch.stack([idxs, (idxs + 1) % cor_num], 1)
idxs[1::2] = idxs[1::2].flip(1)
dis = self.dis
disU = torch.atan2(dis[0::2], dis[1::2]) # atan2(x/z)
seq = disU.argsort()
seq = (seq * 2).repeat_interleave(2)
seq[1::2] += 1
dis = dis[seq]
xyz = torch.cat([
torch.stack([dis[idxs[:, 0]], self.height.repeat(len(idxs)),
dis[idxs[:, 1]]], 1),
torch.stack([dis[idxs[:, 0]], dis.new_tensor(-1.6).repeat(len(idxs)),
dis[idxs[:, 1]]], 1),
], 0)
uv = xyz2uv(xyz)
# 不再排序,而是直接按照uv中的既定顺序,只是找到u最小的点从这里开始
startPlace = uv[0:cor_num].argmin()
seq = torch.arange(startPlace, startPlace + cor_num, device=uv.device) % cor_num
seq = seq.repeat_interleave(2)
seq[1::2] += cor_num
uv = uv[seq]
return uv
def forward(self):
# return torch.tensor(20., device=self.dis.device, requires_grad=True)
uv = self.toUvs()
try:
peaks = cornersToPeaks(cfg, input, img_idx, uv, isInputUv=True)
return lossFunction(output, img_idx, peaks)
except:
traceback.print_exc()
warnings.warn("CornersSolveModule forward时抛出异常")
return torch.tensor(20., device=uv.device, requires_grad=True)
module = CornersSolveModule().to("cpu")
module = solve(module, max_iter=100, lr=1e-2, stop_tol=1e-3, stop_range=5)
uvs = module.toUvs()
return uv2coor(uvs, *e_img_hw), torch.abs(module.forward())
| 29,019 | 43.509202 | 147 | py |
DMH-Net | DMH-Net-main/misc/grab_data.py | '''
Test docstring
'''
# pylint: disable=cell-var-from-loop
# pylint --extension-pkg-whitelist=cv2
import glob
import urllib.request
import json
import math
import numpy as np
import cv2
from tqdm import trange
import argparse
import os
def dump_to_txt(m_path, m_list):
'''Dump a list of string to the given m_path.'''
with open(m_path, 'w') as f:
for dtt_idx in range(m_list.shape[0]):
f.write("%d %d\n" % (m_list[dtt_idx, 0], m_list[dtt_idx, 1]))
def process_mark_points(pmp_obj, pmp_rows, pmp_cols):
'''Process obj (such as door, wall, etc.) to give their 3d coords'''
sux = pmp_obj['StartMarkPoint']['UpPosition']['x']
suz = pmp_obj['StartMarkPoint']['UpPosition']['z']
suy = pmp_obj['StartMarkPoint']['UpPosition']['y']
sdy = pmp_obj['StartMarkPoint']['DownPosition']['y']
azimuth = math.atan2(sux, -suz)
elevation_u = math.atan(suy / np.sqrt(suz * suz + sux * sux))
elevation_d = math.atan(sdy / np.sqrt(suz * suz + sux * sux))
pmp_col0 = int(pmp_cols - (azimuth * 0.5 / np.pi + 0.5) * pmp_cols)
pmp_row0_u = int(pmp_rows / 2 - elevation_u / np.pi * pmp_rows)
pmp_row0_d = int(pmp_rows / 2 - elevation_d / np.pi * pmp_rows)
eux = pmp_obj['EndMarkPoint']['UpPosition']['x']
euz = pmp_obj['EndMarkPoint']['UpPosition']['z']
euy = pmp_obj['EndMarkPoint']['UpPosition']['y']
edy = pmp_obj['EndMarkPoint']['DownPosition']['y']
azimuth = math.atan2(eux, -euz)
elevation_u = math.atan(euy / np.sqrt(euz * euz + eux * eux))
elevation_d = math.atan(edy / np.sqrt(euz * euz + eux * eux))
pmp_col1 = int(pmp_cols - (azimuth * 0.5 / np.pi + 0.5) * pmp_cols)
pmp_row1_u = int(pmp_rows / 2 - elevation_u / np.pi * pmp_rows)
pmp_row1_d = int(pmp_rows / 2 - elevation_d / np.pi * pmp_rows)
return pmp_col0, pmp_col1, pmp_row0_u, pmp_row0_d, pmp_row1_u, pmp_row1_d
# file_path = '/data00/xuezhou/datasets/grabData/woaiwojia_vr.txt'
# f = open(file_path, 'r')
# for line in f:
# file_name = os.path.basename(line)
# save_as = '/data00/xuezhou/datasets/grabData/jsonList/%s.json'%(file_name[:-1])
# print(save_as)
# urllib.request.urlretrieve(line, save_as)
def grab_pano_urls():
json_list = glob.glob('/data00/xuezhou/datasets/grabData/jsonList/*.json')
test_urls = []
for json_path in json_list:
with open(json_path, 'r') as f:
json_data = json.load(f)
for floor in json_data['Floors']:
Rooms = floor['Rooms']
HotSpots = json_data['HotSpots']
for room in Rooms:
room_ids = room['HotSpotIds']
for room_id in room_ids:
matchingHotSpot = list(filter(lambda hs_ele: hs_ele['ID'] == room_id, HotSpots))
if not matchingHotSpot:
print('no matching for room %s from' % room_id, 'in', json_path)
continue
tos_url = matchingHotSpot[0]['tos_origin_url']
if not tos_url:
print('no valid tos_origin_url for room %s in' % room_id, 'in', json_path)
continue
test_urls.append(tos_url)
if (len(test_urls) > 1000):
break
with open('./test_urls.json', 'w') as f:
json.dump(test_urls, f)
if __name__ == '__main__':
# Set all parameters for grab data
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--start_idx', default=-1, type=int, help='starting index of the sorted json list')
parser.add_argument('--end_idx', default=-1, type=int, help='ending index of the sorted json list')
args = parser.parse_args()
SAVE_DATA_FOLDER = '/mnt/cephfs_new_wj/uslabcv/wenchao/datasets/wawj_ori_datasets/wen_grab'
JSON_LIST = sorted(
glob.glob('/mnt/cephfs_new_wj/uslabcv/wenchao/datasets/wawj_ori_datasets/grabData/jsonListMultiFloor/*.json'))
SUB_FOLDER = 'test'
ROWS, COLS = 512, 1024
START_INDEX = 0 if (args.start_idx < 0) else args.start_idx
END_INDEX = len(JSON_LIST) if (args.end_idx < 0) else args.end_idx
label_dir = os.path.join(SAVE_DATA_FOLDER, SUB_FOLDER, 'label_cor')
img_dir = os.path.join(SAVE_DATA_FOLDER, SUB_FOLDER, 'img')
if not os.path.exists(label_dir):
os.makedirs(label_dir)
if not os.path.exists(img_dir):
os.makedirs(img_dir)
# Grab data
for ith_json in trange(START_INDEX, END_INDEX, desc='Grabbing', unit='file'):
# Load and parse json file
json_path = JSON_LIST[ith_json]
with open(json_path, 'r') as f:
json_data = json.load(f)
HotSpots = json_data['HotSpots']
hotspots_ids_map = {HotSpots[k]['ID']: HotSpots[k] for k in range(len(HotSpots))}
for floor in json_data['Floors']:
Rooms = floor['Rooms']
for room in Rooms:
walls = room['Walls']
if (len(walls) < 4):
continue
doors = room['Doors']
windows = room['Windows']
room_ids = room['HotSpotIds']
# import ipdb; ipdb.set_trace()
for room_id in room_ids: # TODO: why list?
# check if room_id is valid
if room_id not in hotspots_ids_map:
print('no matching for room %s from' % room_id, ith_json, 'json file')
continue
matchingHotSpot = hotspots_ids_map[room_id]
tos_url = matchingHotSpot['tos_origin_url']
if not tos_url:
print('no valid tos_origin_url for room %s in' % room_id, ith_json, 'json file')
continue
url_name = os.path.basename(tos_url)
file_name = '%05d_%s' % (ith_json, url_name)
save_as = os.path.join(img_dir, '%s.png' % file_name)
# processing wall
gt_point = []
pre_col1 = -1
for wall in walls:
col0, col1, row0_u, row0_d, row1_u, row1_d = process_mark_points(wall, ROWS, COLS)
if (pre_col1 < 0):
pre_col1 = col1
else:
if (pre_col1 == col0):
pre_col1 = col1
else:
pre_col1 = -1
break
gt_point.append([col0, row0_u])
gt_point.append([col0, row0_d])
if ((pre_col1 == -1) or (col1 != gt_point[0][0])):
continue
gt_point = np.array(gt_point)
# processing door
door_seg_point = []
for door in doors:
col0, col1, row0_u, row0_d, row1_u, row1_d = process_mark_points(door, ROWS, COLS)
door_seg_point.append([col0, row0_u])
door_seg_point.append([col0, row0_d])
door_seg_point.append([col1, row1_u])
door_seg_point.append([col1, row1_d])
door_seg_point = np.array(door_seg_point)
# processing window
window_seg_point = []
for window in windows:
col0, col1, row0_u, row0_d, row1_u, row1_d = process_mark_points(window, ROWS, COLS)
window_seg_point.append([col0, row0_u])
window_seg_point.append([col0, row0_d])
window_seg_point.append([col1, row1_u])
window_seg_point.append([col1, row1_d])
window_seg_point = np.array(window_seg_point)
wall_label_path = os.path.join(label_dir, '%s.txt' % file_name)
dump_to_txt(wall_label_path, gt_point)
door_label_path = os.path.join(label_dir, '%s_ds.txt' % file_name)
dump_to_txt(door_label_path, door_seg_point)
window_label_path = os.path.join(label_dir, '%s_ws.txt' % file_name)
dump_to_txt(window_label_path, window_seg_point)
urllib.request.urlretrieve(tos_url, save_as)
pano_image = cv2.imread(save_as)
cv2.imwrite(save_as, cv2.resize(pano_image, (COLS, ROWS)))
break
| 8,623 | 42.77665 | 118 | py |
DMH-Net | DMH-Net-main/misc/panostretch.py | import functools
import numpy as np
from misc.post_proc import np_coor2xy, np_coorx2u, np_coory2v
from scipy.ndimage import map_coordinates
def uv_meshgrid(w, h):
uv = np.stack(np.meshgrid(range(w), range(h)), axis=-1)
uv = uv.astype(np.float64)
uv[..., 0] = ((uv[..., 0] + 0.5) / w - 0.5) * 2 * np.pi
uv[..., 1] = ((uv[..., 1] + 0.5) / h - 0.5) * np.pi
return uv
@functools.lru_cache()
def _uv_tri(w, h):
uv = uv_meshgrid(w, h)
sin_u = np.sin(uv[..., 0])
cos_u = np.cos(uv[..., 0])
tan_v = np.tan(uv[..., 1])
return sin_u, cos_u, tan_v
def uv_tri(w, h):
sin_u, cos_u, tan_v = _uv_tri(w, h)
return sin_u.copy(), cos_u.copy(), tan_v.copy()
def coorx2u(x, w=1024):
return ((x + 0.5) / w - 0.5) * 2 * np.pi
def coory2v(y, h=512):
return ((y + 0.5) / h - 0.5) * np.pi
def u2coorx(u, w=1024):
return (u / (2 * np.pi) + 0.5) * w - 0.5
def v2coory(v, h=512):
return (v / np.pi + 0.5) * h - 0.5
def uv2xy(u, v, z=-50):
c = z / np.tan(v)
x = c * np.cos(u)
y = c * np.sin(u)
return x, y
def pano_connect_points(p1, p2, z=-50, w=1024, h=512):
"""
在输入的两个点之间线性插值生成若干个点
"""
if p1[0] == p2[0]:
return np.array([p1, p2], np.float32)
u1 = coorx2u(p1[0], w)
v1 = coory2v(p1[1], h)
u2 = coorx2u(p2[0], w)
v2 = coory2v(p2[1], h)
x1, y1 = uv2xy(u1, v1, z)
x2, y2 = uv2xy(u2, v2, z)
if abs(p1[0] - p2[0]) < w / 2:
pstart = np.ceil(min(p1[0], p2[0]))
pend = np.floor(max(p1[0], p2[0]))
else:
pstart = np.ceil(max(p1[0], p2[0]))
pend = np.floor(min(p1[0], p2[0]) + w)
coorxs = (np.arange(pstart, pend + 1) % w).astype(np.float64)
vx = x2 - x1
vy = y2 - y1
us = coorx2u(coorxs, w)
ps = (np.tan(us) * x1 - y1) / (1e-8 + vy - np.tan(us) * vx) # fix divide zero warning
cs = np.sqrt((x1 + ps * vx)**2 + (y1 + ps * vy)**2)
vs = np.arctan2(z, cs)
coorys = v2coory(vs)
return np.stack([coorxs, coorys], axis=-1)
def pano_stretch(img, corners, kx, ky, order=1, edgemap=None, return_edge=False):
'''
img: [H, W, C]
corners: [N, 2] in image coordinate (x, y) format
kx: Stretching along front-back direction
ky: Stretching along left-right direction
order: Interpolation order. 0 for nearest-neighbor. 1 for bilinear.
'''
# Process image
sin_u, cos_u, tan_v = uv_tri(img.shape[1], img.shape[0])
u0 = np.arctan2(sin_u * kx / ky, cos_u)
v0 = np.arctan(tan_v * np.sin(u0) / sin_u * ky)
refx = (u0 / (2 * np.pi) + 0.5) * img.shape[1] - 0.5
refy = (v0 / np.pi + 0.5) * img.shape[0] - 0.5
# [TODO]: using opencv remap could probably speedup the process a little
stretched_img = np.stack(
[map_coordinates(img[..., i], [refy, refx], order=order, mode='wrap') for i in range(img.shape[-1])], axis=-1)
if edgemap is not None:
stretched_edgemap = np.stack(
[map_coordinates(edgemap[..., i], [refy, refx], order=order, mode='wrap') for i in range(edgemap.shape[-1])], axis=-1)
# Process corners
corners_u0 = coorx2u(corners[:, 0], img.shape[1])
corners_v0 = coory2v(corners[:, 1], img.shape[0])
corners_u = np.arctan2(np.sin(corners_u0) * ky / kx, np.cos(corners_u0))
corners_v = np.arctan(np.tan(corners_v0) * np.sin(corners_u) / np.sin(corners_u0) / ky)
cornersX = u2coorx(corners_u, img.shape[1])
cornersY = v2coory(corners_v, img.shape[0])
stretched_corners = np.stack([cornersX, cornersY], axis=-1)
if return_edge:
return stretched_img, stretched_corners, stretched_edgemap
else:
return stretched_img, stretched_corners, None
def visualize_pano_stretch(stretched_img, stretched_cor, title):
'''
Helper function for visualizing the effect of pano_stretch
'''
thikness = 2
color = (0, 255, 0)
for i in range(4):
xys = pano_connect_points(stretched_cor[i * 2], stretched_cor[(i * 2 + 2) % 8], z=-50)
xys = xys.astype(int)
blue_split = np.where((xys[1:, 0] - xys[:-1, 0]) < 0)[0]
if len(blue_split) == 0:
cv2.polylines(stretched_img, [xys], False, color, 2)
else:
t = blue_split[0] + 1
cv2.polylines(stretched_img, [xys[:t]], False, color, thikness)
cv2.polylines(stretched_img, [xys[t:]], False, color, thikness)
for i in range(4):
xys = pano_connect_points(stretched_cor[i * 2 + 1], stretched_cor[(i * 2 + 3) % 8], z=50)
xys = xys.astype(int)
blue_split = np.where((xys[1:, 0] - xys[:-1, 0]) < 0)[0]
if len(blue_split) == 0:
cv2.polylines(stretched_img, [xys], False, color, 2)
else:
t = blue_split[0] + 1
cv2.polylines(stretched_img, [xys[:t]], False, color, thikness)
cv2.polylines(stretched_img, [xys[t:]], False, color, thikness)
cv2.putText(stretched_img, title, (25, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2, cv2.LINE_AA)
return stretched_img.astype(np.uint8)
if __name__ == '__main__':
import argparse
import time
from PIL import Image
import cv2
parser = argparse.ArgumentParser()
parser.add_argument('--i', default='data/valid/img/pano_abpohapclcyuuz.png')
parser.add_argument('--i_gt', default='data/valid/label_cor/pano_abpohapclcyuuz.txt')
parser.add_argument('--o', default='sample_stretched_pano.png')
parser.add_argument('--kx', default=2, type=float, help='Stretching along front-back direction')
parser.add_argument('--ky', default=1, type=float, help='Stretching along left-right direction')
args = parser.parse_args()
img = np.array(Image.open(args.i), np.float64)
with open(args.i_gt) as f:
cor = np.array([line.strip().split() for line in f], np.int32)
stretched_img, stretched_cor = pano_stretch(img, cor, args.kx, args.ky)
title = 'kx=%3.2f, ky=%3.2f' % (args.kx, args.ky)
visual_stretched_img = visualize_pano_stretch(stretched_img, stretched_cor, title)
Image.fromarray(visual_stretched_img).save(args.o)
def convert_coor_to_mline(coor, z=1.0, coorW=1024, coorH=512):
"""
把全景图坐标系下的点,转为投影到天上平面之后,在天空平面的x和y值。
"""
coor = np.array(coor)
u = np_coorx2u(coor[:, 0], coorW)
v = np_coory2v(coor[:, 1], coorH)
c = z / np.tan(v)
x = c * np.sin(u)
y = -c * np.cos(u)
return x, y
| 6,401 | 32 | 126 | py |
DMH-Net | DMH-Net-main/misc/pano_lsd_align.py | '''
This script is helper function for preprocessing.
Most of the code are converted from LayoutNet official's matlab code.
All functions, naming rule and data flow follow official for easier
converting and comparing.
Code is not optimized for python or numpy yet.
Author: Cheng Sun
Email : chengsun@gapp.nthu.edu.tw
'''
import sys
import numpy as np
from scipy.ndimage import map_coordinates
import cv2
import time
def computeUVN(n, in_, planeID):
'''
compute v given u and normal.
'''
if planeID == 2:
n = np.array([n[1], n[2], n[0]])
elif planeID == 3:
n = np.array([n[2], n[0], n[1]])
bc = n[0] * np.sin(in_) + n[1] * np.cos(in_)
bs = n[2]
out = np.arctan(-bc / (bs + 1e-9))
return out
def computeUVN_vec(n, in_, planeID):
'''
vectorization version of computeUVN
@n N x 3
@in_ MN x 1
@planeID N
'''
n = n.copy()
if (planeID == 2).sum():
n[planeID == 2] = np.roll(n[planeID == 2], 2, axis=1)
if (planeID == 3).sum():
n[planeID == 3] = np.roll(n[planeID == 3], 1, axis=1)
n = np.repeat(n, in_.shape[0] // n.shape[0], axis=0)
assert n.shape[0] == in_.shape[0]
bc = n[:, [0]] * np.sin(in_) + n[:, [1]] * np.cos(in_)
bs = n[:, [2]]
out = np.arctan(-bc / (bs + 1e-9))
return out
def xyz2uvN(xyz, planeID=1):
ID1 = (int(planeID) - 1 + 0) % 3
ID2 = (int(planeID) - 1 + 1) % 3
ID3 = (int(planeID) - 1 + 2) % 3
normXY = np.sqrt(xyz[:, [ID1]]**2 + xyz[:, [ID2]]**2)
normXY[normXY < 0.000001] = 0.000001
normXYZ = np.sqrt(xyz[:, [ID1]]**2 + xyz[:, [ID2]]**2 + xyz[:, [ID3]]**2)
v = np.arcsin(xyz[:, [ID3]] / normXYZ)
u = np.arcsin(xyz[:, [ID1]] / normXY)
valid = (xyz[:, [ID2]] < 0) & (u >= 0)
u[valid] = np.pi - u[valid]
valid = (xyz[:, [ID2]] < 0) & (u <= 0)
u[valid] = -np.pi - u[valid]
uv = np.hstack([u, v])
uv[np.isnan(uv[:, 0]), 0] = 0
return uv
def uv2xyzN(uv, planeID=1):
ID1 = (int(planeID) - 1 + 0) % 3
ID2 = (int(planeID) - 1 + 1) % 3
ID3 = (int(planeID) - 1 + 2) % 3
xyz = np.zeros((uv.shape[0], 3))
xyz[:, ID1] = np.cos(uv[:, 1]) * np.sin(uv[:, 0])
xyz[:, ID2] = np.cos(uv[:, 1]) * np.cos(uv[:, 0])
xyz[:, ID3] = np.sin(uv[:, 1])
return xyz
def uv2xyzN_vec(uv, planeID):
'''
vectorization version of uv2xyzN
@uv N x 2
@planeID N
'''
assert (planeID.astype(int) != planeID).sum() == 0
planeID = planeID.astype(int)
ID1 = (planeID - 1 + 0) % 3
ID2 = (planeID - 1 + 1) % 3
ID3 = (planeID - 1 + 2) % 3
ID = np.arange(len(uv))
xyz = np.zeros((len(uv), 3))
xyz[ID, ID1] = np.cos(uv[:, 1]) * np.sin(uv[:, 0])
xyz[ID, ID2] = np.cos(uv[:, 1]) * np.cos(uv[:, 0])
xyz[ID, ID3] = np.sin(uv[:, 1])
return xyz
def warpImageFast(im, XXdense, YYdense):
minX = max(1., np.floor(XXdense.min()) - 1)
minY = max(1., np.floor(YYdense.min()) - 1)
maxX = min(im.shape[1], np.ceil(XXdense.max()) + 1)
maxY = min(im.shape[0], np.ceil(YYdense.max()) + 1)
im = im[int(round(minY - 1)):int(round(maxY)),
int(round(minX - 1)):int(round(maxX))]
assert XXdense.shape == YYdense.shape
out_shape = XXdense.shape
coordinates = [
(YYdense - minY).reshape(-1),
(XXdense - minX).reshape(-1),
]
im_warp = np.stack([
map_coordinates(im[..., c], coordinates, order=1).reshape(out_shape)
for c in range(im.shape[-1])
],
axis=-1)
return im_warp
def rotatePanorama(img, vp=None, R=None):
'''
Rotate panorama
if R is given, vp (vanishing point) will be overlooked
otherwise R is computed from vp
'''
sphereH, sphereW, C = img.shape
# new uv coordinates
TX, TY = np.meshgrid(range(1, sphereW + 1), range(1, sphereH + 1))
TX = TX.reshape(-1, 1, order='F')
TY = TY.reshape(-1, 1, order='F')
ANGx = (TX - sphereW / 2 - 0.5) / sphereW * np.pi * 2
ANGy = -(TY - sphereH / 2 - 0.5) / sphereH * np.pi
uvNew = np.hstack([ANGx, ANGy])
xyzNew = uv2xyzN(uvNew, 1)
# rotation matrix
if R is None:
R = np.linalg.inv(vp.T)
xyzOld = np.linalg.solve(R, xyzNew.T).T
uvOld = xyz2uvN(xyzOld, 1)
Px = (uvOld[:, 0] + np.pi) / (2 * np.pi) * sphereW + 0.5
Py = (-uvOld[:, 1] + np.pi / 2) / np.pi * sphereH + 0.5
Px = Px.reshape(sphereH, sphereW, order='F')
Py = Py.reshape(sphereH, sphereW, order='F')
# boundary
imgNew = np.zeros((sphereH + 2, sphereW + 2, C), np.float64)
imgNew[1:-1, 1:-1, :] = img
imgNew[1:-1, 0, :] = img[:, -1, :]
imgNew[1:-1, -1, :] = img[:, 0, :]
imgNew[0, 1:sphereW // 2 + 1, :] = img[0, sphereW - 1:sphereW // 2 -
1:-1, :]
imgNew[0, sphereW // 2 + 1:-1, :] = img[0, sphereW // 2 - 1::-1, :]
imgNew[-1, 1:sphereW // 2 + 1, :] = img[-1, sphereW - 1:sphereW // 2 -
1:-1, :]
imgNew[-1, sphereW // 2 + 1:-1, :] = img[0, sphereW // 2 - 1::-1, :]
imgNew[0, 0, :] = img[0, 0, :]
imgNew[-1, -1, :] = img[-1, -1, :]
imgNew[0, -1, :] = img[0, -1, :]
imgNew[-1, 0, :] = img[-1, 0, :]
rotImg = warpImageFast(imgNew, Px + 1, Py + 1)
return rotImg
def imgLookAt(im, CENTERx, CENTERy, new_imgH, fov):
sphereH = im.shape[0]
sphereW = im.shape[1]
warped_im = np.zeros((new_imgH, new_imgH, 3))
TX, TY = np.meshgrid(range(1, new_imgH + 1), range(1, new_imgH + 1))
TX = TX.reshape(-1, 1, order='F')
TY = TY.reshape(-1, 1, order='F')
TX = TX - 0.5 - new_imgH / 2
TY = TY - 0.5 - new_imgH / 2
r = new_imgH / 2 / np.tan(fov / 2)
# convert to 3D
R = np.sqrt(TY**2 + r**2)
ANGy = np.arctan(-TY / r)
ANGy = ANGy + CENTERy
X = np.sin(ANGy) * R
Y = -np.cos(ANGy) * R
Z = TX
INDn = np.nonzero(np.abs(ANGy) > np.pi / 2)
# project back to sphere
ANGx = np.arctan(Z / -Y)
RZY = np.sqrt(Z**2 + Y**2)
ANGy = np.arctan(X / RZY)
ANGx[INDn] = ANGx[INDn] + np.pi
ANGx = ANGx + CENTERx
INDy = np.nonzero(ANGy < -np.pi / 2)
ANGy[INDy] = -np.pi - ANGy[INDy]
ANGx[INDy] = ANGx[INDy] + np.pi
INDx = np.nonzero(ANGx <= -np.pi)
ANGx[INDx] = ANGx[INDx] + 2 * np.pi
INDx = np.nonzero(ANGx > np.pi)
ANGx[INDx] = ANGx[INDx] - 2 * np.pi
INDx = np.nonzero(ANGx > np.pi)
ANGx[INDx] = ANGx[INDx] - 2 * np.pi
INDx = np.nonzero(ANGx > np.pi)
ANGx[INDx] = ANGx[INDx] - 2 * np.pi
Px = (ANGx + np.pi) / (2 * np.pi) * sphereW + 0.5
Py = ((-ANGy) + np.pi / 2) / np.pi * sphereH + 0.5
INDxx = np.nonzero(Px < 1)
Px[INDxx] = Px[INDxx] + sphereW
im = np.concatenate([im, im[:, :2]], 1)
Px = Px.reshape(new_imgH, new_imgH, order='F')
Py = Py.reshape(new_imgH, new_imgH, order='F')
warped_im = warpImageFast(im, Px, Py)
return warped_im
def separatePano(panoImg, fov, x, y, imgSize=320):
'''cut a panorama image into several separate views'''
assert x.shape == y.shape
if not isinstance(fov, np.ndarray):
fov = fov * np.ones_like(x)
sepScene = [{
'img': imgLookAt(panoImg.copy(), xi, yi, imgSize, fovi),
'vx': xi,
'vy': yi,
'fov': fovi,
'sz': imgSize,
} for xi, yi, fovi in zip(x, y, fov)]
return sepScene
def lsdWrap(img, LSD=None, **kwargs):
'''
Opencv implementation of
Rafael Grompone von Gioi, Jérémie Jakubowicz, Jean-Michel Morel, and Gregory Randall,
LSD: a Line Segment Detector, Image Processing On Line, vol. 2012.
[Rafael12] http://www.ipol.im/pub/art/2012/gjmr-lsd/?utm_source=doi
@img
input image
@LSD
Constructing by cv2.createLineSegmentDetector
https://docs.opencv.org/3.0-beta/modules/imgproc/doc/feature_detection.html#linesegmentdetector
if LSD is given, kwargs will be ignored
@kwargs
is used to construct LSD
work only if @LSD is not given
'''
if LSD is None:
LSD = cv2.createLineSegmentDetector(**kwargs)
if len(img.shape) == 3:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
lines, width, prec, nfa = LSD.detect(img)
if lines is None:
return np.zeros_like(img), np.array([])
edgeMap = LSD.drawSegments(np.zeros_like(img), lines)[..., -1]
lines = np.squeeze(lines, 1)
edgeList = np.concatenate([lines, width, prec, nfa], 1)
return edgeMap, edgeList
def edgeFromImg2Pano(edge):
edgeList = edge['edgeLst']
if len(edgeList) == 0:
return np.array([])
vx = edge['vx']
vy = edge['vy']
fov = edge['fov']
imH, imW = edge['img'].shape
R = (imW / 2) / np.tan(fov / 2)
# im is the tangent plane, contacting with ball at [x0 y0 z0]
x0 = R * np.cos(vy) * np.sin(vx)
y0 = R * np.cos(vy) * np.cos(vx)
z0 = R * np.sin(vy)
vecposX = np.array([np.cos(vx), -np.sin(vx), 0])
vecposY = np.cross(np.array([x0, y0, z0]), vecposX)
vecposY = vecposY / np.sqrt(vecposY @ vecposY.T)
vecposX = vecposX.reshape(1, -1)
vecposY = vecposY.reshape(1, -1)
Xc = (0 + imW - 1) / 2
Yc = (0 + imH - 1) / 2
vecx1 = edgeList[:, [0]] - Xc
vecy1 = edgeList[:, [1]] - Yc
vecx2 = edgeList[:, [2]] - Xc
vecy2 = edgeList[:, [3]] - Yc
vec1 = np.tile(vecx1, [1, 3]) * vecposX + np.tile(vecy1, [1, 3]) * vecposY
vec2 = np.tile(vecx2, [1, 3]) * vecposX + np.tile(vecy2, [1, 3]) * vecposY
coord1 = [[x0, y0, z0]] + vec1
coord2 = [[x0, y0, z0]] + vec2
normal = np.cross(coord1, coord2, axis=1)
normal = normal / np.linalg.norm(normal, axis=1, keepdims=True)
panoList = np.hstack([normal, coord1, coord2, edgeList[:, [-1]]])
return panoList
def _intersection(range1, range2):
if range1[1] < range1[0]:
range11 = [range1[0], 1]
range12 = [0, range1[1]]
else:
range11 = range1
range12 = [0, 0]
if range2[1] < range2[0]:
range21 = [range2[0], 1]
range22 = [0, range2[1]]
else:
range21 = range2
range22 = [0, 0]
b = max(range11[0], range21[0]) < min(range11[1], range21[1])
if b:
return b
b2 = max(range12[0], range22[0]) < min(range12[1], range22[1])
b = b or b2
return b
def _insideRange(pt, range):
if range[1] > range[0]:
b = pt >= range[0] and pt <= range[1]
else:
b1 = pt >= range[0] and pt <= 1
b2 = pt >= 0 and pt <= range[1]
b = b1 or b2
return b
def combineEdgesN(edges):
'''
Combine some small line segments, should be very conservative
OUTPUT
lines: combined line segments
ori_lines: original line segments
line format [nx ny nz projectPlaneID umin umax LSfov score]
'''
arcList = []
for edge in edges:
panoLst = edge['panoLst']
if len(panoLst) == 0:
continue
arcList.append(panoLst)
arcList = np.vstack(arcList)
# ori lines
numLine = len(arcList)
ori_lines = np.zeros((numLine, 8))
areaXY = np.abs(arcList[:, 2])
areaYZ = np.abs(arcList[:, 0])
areaZX = np.abs(arcList[:, 1])
planeIDs = np.argmax(np.stack([areaXY, areaYZ, areaZX], -1),
1) + 1 # XY YZ ZX
for i in range(numLine):
ori_lines[i, :3] = arcList[i, :3]
ori_lines[i, 3] = planeIDs[i]
coord1 = arcList[i, 3:6]
coord2 = arcList[i, 6:9]
uv = xyz2uvN(np.stack([coord1, coord2]), planeIDs[i])
umax = uv[:, 0].max() + np.pi
umin = uv[:, 0].min() + np.pi
if umax - umin > np.pi:
ori_lines[i, 4:6] = np.array([umax, umin]) / 2 / np.pi
else:
ori_lines[i, 4:6] = np.array([umin, umax]) / 2 / np.pi
ori_lines[i, 6] = np.arccos(
(np.dot(coord1, coord2) /
(np.linalg.norm(coord1) * np.linalg.norm(coord2))).clip(-1, 1))
ori_lines[i, 7] = arcList[i, 9]
# additive combination
lines = ori_lines.copy()
for _ in range(3):
numLine = len(lines)
valid_line = np.ones(numLine, bool)
for i in range(numLine):
if not valid_line[i]:
continue
dotProd = (lines[:, :3] * lines[[i], :3]).sum(1)
valid_curr = np.logical_and(
(np.abs(dotProd) > np.cos(np.pi / 180)), valid_line)
valid_curr[i] = False
for j in np.nonzero(valid_curr)[0]:
range1 = lines[i, 4:6]
range2 = lines[j, 4:6]
valid_rag = _intersection(range1, range2)
if not valid_rag:
continue
# combine
I = np.argmax(np.abs(lines[i, :3]))
if lines[i, I] * lines[j, I] > 0:
nc = lines[i, :3] * lines[i, 6] + lines[j, :3] * lines[j, 6]
else:
nc = lines[i, :3] * lines[i, 6] - lines[j, :3] * lines[j, 6]
nc = nc / np.linalg.norm(nc)
if _insideRange(range1[0], range2):
nrmin = range2[0]
else:
nrmin = range1[0]
if _insideRange(range1[1], range2):
nrmax = range2[1]
else:
nrmax = range1[1]
u = np.array([[nrmin], [nrmax]]) * 2 * np.pi - np.pi
v = computeUVN(nc, u, lines[i, 3])
xyz = uv2xyzN(np.hstack([u, v]), lines[i, 3])
l = np.arccos(np.dot(xyz[0, :], xyz[1, :]).clip(-1, 1))
scr = (lines[i, 6] * lines[i, 7] +
lines[j, 6] * lines[j, 7]) / (lines[i, 6] + lines[j, 6])
lines[i] = [*nc, lines[i, 3], nrmin, nrmax, l, scr]
valid_line[j] = False
lines = lines[valid_line]
return lines, ori_lines
def icosahedron2sphere(level):
# this function use a icosahedron to sample uniformly on a sphere
a = 2 / (1 + np.sqrt(5))
M = np.array([
0, a, -1, a, 1, 0, -a, 1, 0, 0, a, 1, -a, 1, 0, a, 1, 0, 0, a, 1, 0,
-a, 1, -1, 0, a, 0, a, 1, 1, 0, a, 0, -a, 1, 0, a, -1, 0, -a, -1, 1, 0,
-a, 0, a, -1, -1, 0, -a, 0, -a, -1, 0, -a, 1, a, -1, 0, -a, -1, 0, 0,
-a, -1, -a, -1, 0, a, -1, 0, -a, 1, 0, -1, 0, a, -1, 0, -a, -a, -1, 0,
-1, 0, -a, -1, 0, a, a, 1, 0, 1, 0, -a, 1, 0, a, a, -1, 0, 1, 0, a, 1,
0, -a, 0, a, 1, -1, 0, a, -a, 1, 0, 0, a, 1, a, 1, 0, 1, 0, a, 0, a,
-1, -a, 1, 0, -1, 0, -a, 0, a, -1, 1, 0, -a, a, 1, 0, 0, -a, -1, -1, 0,
-a, -a, -1, 0, 0, -a, -1, a, -1, 0, 1, 0, -a, 0, -a, 1, -a, -1, 0, -1,
0, a, 0, -a, 1, 1, 0, a, a, -1, 0
])
coor = M.T.reshape(3, 60, order='F').T
coor, idx = np.unique(coor, return_inverse=True, axis=0)
tri = idx.reshape(3, 20, order='F').T
# extrude
coor = list(
coor / np.tile(np.linalg.norm(coor, axis=1, keepdims=True), (1, 3)))
for _ in range(level):
triN = []
for t in range(len(tri)):
n = len(coor)
coor.append((coor[tri[t, 0]] + coor[tri[t, 1]]) / 2)
coor.append((coor[tri[t, 1]] + coor[tri[t, 2]]) / 2)
coor.append((coor[tri[t, 2]] + coor[tri[t, 0]]) / 2)
triN.append([n, tri[t, 0], n + 2])
triN.append([n, tri[t, 1], n + 1])
triN.append([n + 1, tri[t, 2], n + 2])
triN.append([n, n + 1, n + 2])
tri = np.array(triN)
# uniquefy
coor, idx = np.unique(coor, return_inverse=True, axis=0)
tri = idx[tri]
# extrude
coor = list(coor / np.tile(
np.sqrt(np.sum(coor * coor, 1, keepdims=True)), (1, 3)))
return np.array(coor), np.array(tri)
def curveFitting(inputXYZ, weight):
'''
@inputXYZ: N x 3
@weight : N x 1
'''
l = np.linalg.norm(inputXYZ, axis=1, keepdims=True)
inputXYZ = inputXYZ / l
weightXYZ = inputXYZ * weight
XX = np.sum(weightXYZ[:, 0]**2)
YY = np.sum(weightXYZ[:, 1]**2)
ZZ = np.sum(weightXYZ[:, 2]**2)
XY = np.sum(weightXYZ[:, 0] * weightXYZ[:, 1])
YZ = np.sum(weightXYZ[:, 1] * weightXYZ[:, 2])
ZX = np.sum(weightXYZ[:, 2] * weightXYZ[:, 0])
A = np.array([[XX, XY, ZX], [XY, YY, YZ], [ZX, YZ, ZZ]])
U, S, Vh = np.linalg.svd(A)
outputNM = Vh[-1, :]
outputNM = outputNM / np.linalg.norm(outputNM)
return outputNM
def sphereHoughVote(segNormal,
segLength,
segScores,
binRadius,
orthTolerance,
candiSet,
force_unempty=True):
# initial guess
numLinesg = len(segNormal)
voteBinPoints = candiSet.copy()
voteBinPoints = voteBinPoints[~(voteBinPoints[:, 2] < 0)]
reversValid = (segNormal[:, 2] < 0).reshape(-1)
segNormal[reversValid] = -segNormal[reversValid]
voteBinUV = xyz2uvN(voteBinPoints)
numVoteBin = len(voteBinPoints)
voteBinValues = np.zeros(numVoteBin)
for i in range(numLinesg):
tempNorm = segNormal[[i]]
tempDots = (voteBinPoints * tempNorm).sum(1)
valid = np.abs(tempDots) < np.cos((90 - binRadius) * np.pi / 180)
voteBinValues[
valid] = voteBinValues[valid] + segScores[i] * segLength[i]
checkIDs1 = np.nonzero(voteBinUV[:, [1]] > np.pi / 3)[0]
voteMax = 0
checkID1Max = 0
checkID2Max = 0
checkID3Max = 0
for j in range(len(checkIDs1)):
checkID1 = checkIDs1[j]
vote1 = voteBinValues[checkID1]
if voteBinValues[checkID1] == 0 and force_unempty:
continue
checkNormal = voteBinPoints[[checkID1]]
dotProduct = (voteBinPoints * checkNormal).sum(1)
checkIDs2 = np.nonzero(
np.abs(dotProduct) < np.cos((90 - orthTolerance) * np.pi / 180))[0]
for i in range(len(checkIDs2)):
checkID2 = checkIDs2[i]
if voteBinValues[checkID2] == 0 and force_unempty:
continue
vote2 = vote1 + voteBinValues[checkID2]
cpv = np.cross(voteBinPoints[checkID1],
voteBinPoints[checkID2]).reshape(1, 3)
cpn = np.linalg.norm(cpv)
dotProduct = (voteBinPoints * cpv).sum(1) / cpn
checkIDs3 = np.nonzero(
np.abs(dotProduct) > np.cos(orthTolerance * np.pi / 180))[0]
for k in range(len(checkIDs3)):
checkID3 = checkIDs3[k]
if voteBinValues[checkID3] == 0 and force_unempty:
continue
vote3 = vote2 + voteBinValues[checkID3]
if vote3 > voteMax:
lastStepCost = vote3 - voteMax
if voteMax != 0:
tmp = (voteBinPoints[[checkID1Max, checkID2Max, checkID3Max]] * \
voteBinPoints[[checkID1, checkID2, checkID3]]).sum(1)
lastStepAngle = np.arccos(tmp.clip(-1, 1))
else:
lastStepAngle = np.zeros(3)
checkID1Max = checkID1
checkID2Max = checkID2
checkID3Max = checkID3
voteMax = vote3
if checkID1Max == 0:
print(
'[WARN] sphereHoughVote: no orthogonal voting exist',
file=sys.stderr)
return None, 0, 0
initXYZ = voteBinPoints[[checkID1Max, checkID2Max, checkID3Max]]
# refine
refiXYZ = np.zeros((3, 3))
dotprod = (segNormal * initXYZ[[0]]).sum(1)
valid = np.abs(dotprod) < np.cos((90 - binRadius) * np.pi / 180)
validNm = segNormal[valid]
validWt = segLength[valid] * segScores[valid]
validWt = validWt / validWt.max()
refiNM = curveFitting(validNm, validWt)
refiXYZ[0] = refiNM.copy()
dotprod = (segNormal * initXYZ[[1]]).sum(1)
valid = np.abs(dotprod) < np.cos((90 - binRadius) * np.pi / 180)
validNm = segNormal[valid]
validWt = segLength[valid] * segScores[valid]
validWt = validWt / validWt.max()
validNm = np.vstack([validNm, refiXYZ[[0]]])
validWt = np.vstack([validWt, validWt.sum(0, keepdims=1) * 0.1])
refiNM = curveFitting(validNm, validWt)
refiXYZ[1] = refiNM.copy()
refiNM = np.cross(refiXYZ[0], refiXYZ[1])
refiXYZ[2] = refiNM / np.linalg.norm(refiNM)
return refiXYZ, lastStepCost, lastStepAngle
def findMainDirectionEMA(lines):
'''compute vp from set of lines'''
# initial guess
segNormal = lines[:, :3]
segLength = lines[:, [6]]
segScores = np.ones((len(lines), 1))
shortSegValid = (segLength < 5 * np.pi / 180).reshape(-1)
segNormal = segNormal[~shortSegValid, :]
segLength = segLength[~shortSegValid]
segScores = segScores[~shortSegValid]
numLinesg = len(segNormal)
candiSet, tri = icosahedron2sphere(3)
ang = np.arccos((candiSet[tri[0, 0]] * candiSet[tri[0, 1]]).sum().clip(
-1, 1)) / np.pi * 180
binRadius = ang / 2
initXYZ, score, angle = sphereHoughVote(segNormal, segLength, segScores,
2 * binRadius, 2, candiSet)
if initXYZ is None:
print('[WARN] findMainDirectionEMA: initial failed', file=sys.stderr)
return None, score, angle
# iterative refine
iter_max = 3
candiSet, tri = icosahedron2sphere(5)
numCandi = len(candiSet)
angD = np.arccos((candiSet[tri[0, 0]] * candiSet[tri[0, 1]]).sum().clip(
-1, 1)) / np.pi * 180
binRadiusD = angD / 2
curXYZ = initXYZ.copy()
tol = np.linspace(4 * binRadius, 4 * binRadiusD,
iter_max) # shrink down ls and candi
for it in range(iter_max):
dot1 = np.abs((segNormal * curXYZ[[0]]).sum(1))
dot2 = np.abs((segNormal * curXYZ[[1]]).sum(1))
dot3 = np.abs((segNormal * curXYZ[[2]]).sum(1))
valid1 = dot1 < np.cos((90 - tol[it]) * np.pi / 180)
valid2 = dot2 < np.cos((90 - tol[it]) * np.pi / 180)
valid3 = dot3 < np.cos((90 - tol[it]) * np.pi / 180)
valid = valid1 | valid2 | valid3
if np.sum(valid) == 0:
print(
'[WARN] findMainDirectionEMA: zero line segments for voting',
file=sys.stderr)
break
subSegNormal = segNormal[valid]
subSegLength = segLength[valid]
subSegScores = segScores[valid]
dot1 = np.abs((candiSet * curXYZ[[0]]).sum(1))
dot2 = np.abs((candiSet * curXYZ[[1]]).sum(1))
dot3 = np.abs((candiSet * curXYZ[[2]]).sum(1))
valid1 = dot1 > np.cos(tol[it] * np.pi / 180)
valid2 = dot2 > np.cos(tol[it] * np.pi / 180)
valid3 = dot3 > np.cos(tol[it] * np.pi / 180)
valid = valid1 | valid2 | valid3
if np.sum(valid) == 0:
print(
'[WARN] findMainDirectionEMA: zero line segments for voting',
file=sys.stderr)
break
subCandiSet = candiSet[valid]
tcurXYZ, _, _ = sphereHoughVote(subSegNormal, subSegLength,
subSegScores, 2 * binRadiusD, 2,
subCandiSet)
if tcurXYZ is None:
print(
'[WARN] findMainDirectionEMA: no answer found',
file=sys.stderr)
break
curXYZ = tcurXYZ.copy()
mainDirect = curXYZ.copy()
mainDirect[0] = mainDirect[0] * np.sign(mainDirect[0, 2])
mainDirect[1] = mainDirect[1] * np.sign(mainDirect[1, 2])
mainDirect[2] = mainDirect[2] * np.sign(mainDirect[2, 2])
uv = xyz2uvN(mainDirect)
I1 = np.argmax(uv[:, 1])
J = np.setdiff1d(np.arange(3), I1)
I2 = np.argmin(np.abs(np.sin(uv[J, 0])))
I2 = J[I2]
I3 = np.setdiff1d(np.arange(3), np.hstack([I1, I2]))
mainDirect = np.vstack([mainDirect[I1], mainDirect[I2], mainDirect[I3]])
mainDirect[0] = mainDirect[0] * np.sign(mainDirect[0, 2])
mainDirect[1] = mainDirect[1] * np.sign(mainDirect[1, 1])
mainDirect[2] = mainDirect[2] * np.sign(mainDirect[2, 0])
mainDirect = np.vstack([mainDirect, -mainDirect])
return mainDirect, score, angle
def multi_linspace(start, stop, num):
div = (num - 1)
y = np.arange(0, num, dtype=np.float64)
steps = (stop - start) / div
return steps.reshape(-1, 1) * y + start.reshape(-1, 1)
def assignVanishingType(lines, vp, tol, area=10):
numLine = len(lines)
numVP = len(vp)
typeCost = np.zeros((numLine, numVP))
# perpendicular
for vid in range(numVP):
cosint = (lines[:, :3] * vp[[vid]]).sum(1)
typeCost[:, vid] = np.arcsin(np.abs(cosint).clip(-1, 1))
# infinity
u = np.stack([lines[:, 4], lines[:, 5]], -1)
u = u.reshape(-1, 1) * 2 * np.pi - np.pi
v = computeUVN_vec(lines[:, :3], u, lines[:, 3])
xyz = uv2xyzN_vec(np.hstack([u, v]), np.repeat(lines[:, 3], 2))
xyz = multi_linspace(xyz[0::2].reshape(-1), xyz[1::2].reshape(-1), 100)
xyz = np.vstack([blk.T for blk in np.split(xyz, numLine)])
xyz = xyz / np.linalg.norm(xyz, axis=1, keepdims=True)
for vid in range(numVP):
ang = np.arccos(np.abs((xyz * vp[[vid]]).sum(1)).clip(-1, 1))
notok = (ang < area * np.pi / 180).reshape(numLine, 100).sum(1) != 0
typeCost[notok, vid] = 100
I = typeCost.min(1)
tp = typeCost.argmin(1)
tp[I > tol] = numVP + 1
return tp, typeCost
def refitLineSegmentB(lines, vp, vpweight=0.1):
'''
Refit direction of line segments
INPUT:
lines: original line segments
vp: vannishing point
vpweight: if set to 0, lines will not change; if set to inf, lines will
be forced to pass vp
'''
numSample = 100
numLine = len(lines)
xyz = np.zeros((numSample + 1, 3))
wei = np.ones((numSample + 1, 1))
wei[numSample] = vpweight * numSample
lines_ali = lines.copy()
for i in range(numLine):
n = lines[i, :3]
sid = lines[i, 4] * 2 * np.pi
eid = lines[i, 5] * 2 * np.pi
if eid < sid:
x = np.linspace(sid, eid + 2 * np.pi, numSample) % (2 * np.pi)
else:
x = np.linspace(sid, eid, numSample)
u = -np.pi + x.reshape(-1, 1)
v = computeUVN(n, u, lines[i, 3])
xyz[:numSample] = uv2xyzN(np.hstack([u, v]), lines[i, 3])
xyz[numSample] = vp
outputNM = curveFitting(xyz, wei)
lines_ali[i, :3] = outputNM
return lines_ali
def paintParameterLine(parameterLine, width, height):
lines = parameterLine.copy()
panoEdgeC = np.zeros((height, width))
num_sample = max(height, width)
for i in range(len(lines)):
n = lines[i, :3]
sid = lines[i, 4] * 2 * np.pi
eid = lines[i, 5] * 2 * np.pi
if eid < sid:
x = np.linspace(sid, eid + 2 * np.pi, num_sample)
x = x % (2 * np.pi)
else:
x = np.linspace(sid, eid, num_sample)
u = -np.pi + x.reshape(-1, 1)
v = computeUVN(n, u, lines[i, 3])
xyz = uv2xyzN(np.hstack([u, v]), lines[i, 3])
uv = xyz2uvN(xyz, 1)
m = np.minimum(
np.floor((uv[:, 0] + np.pi) / (2 * np.pi) * width) + 1,
width).astype(np.int32)
n = np.minimum(
np.floor(((np.pi / 2) - uv[:, 1]) / np.pi * height) + 1,
height).astype(np.int32)
panoEdgeC[n - 1, m - 1] = i
return panoEdgeC
def panoEdgeDetection(img, viewSize=320, qError=0.7, refineIter=3):
'''
line detection on panorama
INPUT:
img: image waiting for detection, double type, range 0~1
viewSize: image size of croped views
qError: set smaller if more line segment wanted
OUTPUT:
oLines: detected line segments
vp: vanishing point
views: separate views of panorama
edges: original detection of line segments in separate views
panoEdge: image for visualize line segments
'''
start = time.time()
cutSize = viewSize
fov = np.pi / 3
xh = np.arange(-np.pi, np.pi * 5 / 6, np.pi / 6)
yh = np.zeros(xh.shape[0])
xp = np.array([
-3 / 3, -2 / 3, -1 / 3, 0 / 3, 1 / 3, 2 / 3, -3 / 3, -2 / 3, -1 / 3,
0 / 3, 1 / 3, 2 / 3
]) * np.pi
yp = np.array([
1 / 4, 1 / 4, 1 / 4, 1 / 4, 1 / 4, 1 / 4, -1 / 4, -1 / 4, -1 / 4,
-1 / 4, -1 / 4, -1 / 4
]) * np.pi
x = np.concatenate([xh, xp, [0, 0]])
y = np.concatenate([yh, yp, [np.pi / 2., -np.pi / 2]])
sepScene = separatePano(img.copy(), fov, x, y, cutSize)
stop1 = time.time()
edge = []
LSD = cv2.createLineSegmentDetector(
_refine=cv2.LSD_REFINE_ADV, _quant=qError)
for i, scene in enumerate(sepScene):
edgeMap, edgeList = lsdWrap(scene['img'], LSD)
edge.append({
'img': edgeMap,
'edgeLst': edgeList,
'vx': scene['vx'],
'vy': scene['vy'],
'fov': scene['fov'],
})
edge[-1]['panoLst'] = edgeFromImg2Pano(edge[-1])
lines, olines = combineEdgesN(edge)
stop2 = time.time()
clines = lines.copy()
for _ in range(refineIter):
mainDirect, score, angle = findMainDirectionEMA(clines)
tp, typeCost = assignVanishingType(lines, mainDirect[:3], 0.1, 10)
lines1 = lines[tp == 0]
lines2 = lines[tp == 1]
lines3 = lines[tp == 2]
lines1rB = refitLineSegmentB(lines1, mainDirect[0], 0)
lines2rB = refitLineSegmentB(lines2, mainDirect[1], 0)
lines3rB = refitLineSegmentB(lines3, mainDirect[2], 0)
clines = np.vstack([lines1rB, lines2rB, lines3rB])
stop3 = time.time()
panoEdge1r = paintParameterLine(lines1rB, img.shape[1], img.shape[0])
panoEdge2r = paintParameterLine(lines2rB, img.shape[1], img.shape[0])
panoEdge3r = paintParameterLine(lines3rB, img.shape[1], img.shape[0])
panoEdger = np.stack([panoEdge1r, panoEdge2r, panoEdge3r], -1)
# output
olines = clines
vp = mainDirect
views = sepScene
edges = edge
panoEdge = panoEdger
print('prep %f, lsd %f, vp %f' % (stop1 - start, stop2 - stop1,
stop3 - stop2))
return olines, vp, views, edges, panoEdge, score, angle
if __name__ == '__main__':
# disable OpenCV3's non thread safe OpenCL option
cv2.ocl.setUseOpenCL(False)
import os
import argparse
import PIL
from PIL import Image
import time
parser = argparse.ArgumentParser()
parser.add_argument('--i', required=True)
parser.add_argument('--o_prefix', required=True)
parser.add_argument('--qError', default=0.7, type=float)
parser.add_argument('--refineIter', default=3, type=int)
args = parser.parse_args()
# Read image
img_ori = np.array(Image.open(args.i).resize((1024, 512)))
# Vanishing point estimation & Line segments detection
s_time = time.time()
olines, vp, views, edges, panoEdge, score, angle = panoEdgeDetection(
img_ori, qError=args.qError, refineIter=args.refineIter)
print('Elapsed time: %.2f' % (time.time() - s_time))
panoEdge = (panoEdge > 0)
print('Vanishing point:')
for v in vp[2::-1]:
print('%.6f %.6f %.6f' % tuple(v))
# Visualization
edg = rotatePanorama(panoEdge.astype(np.float64), vp[2::-1])
img = rotatePanorama(img_ori / 255.0, vp[2::-1])
one = img.copy() * 0.5
one[(edg > 0.5).sum(-1) > 0] = 0
one[edg[..., 0] > 0.5, 0] = 1
one[edg[..., 1] > 0.5, 1] = 1
one[edg[..., 2] > 0.5, 2] = 1
Image.fromarray(
(edg * 255).astype(np.uint8)).save('%s_edg.png' % args.o_prefix)
Image.fromarray(
(img * 255).astype(np.uint8)).save('%s_img.png' % args.o_prefix)
Image.fromarray(
(one * 255).astype(np.uint8)).save('%s_one.png' % args.o_prefix)
| 31,860 | 32.257829 | 103 | py |
DMH-Net | DMH-Net-main/misc/utils.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
def group_weight(module):
# Group module parameters into two group
# One need weight_decay and the other doesn't
group_decay = []
group_no_decay = []
for m in module.modules():
if isinstance(m, nn.Linear):
group_decay.append(m.weight)
if m.bias is not None:
group_no_decay.append(m.bias)
elif isinstance(m, nn.modules.conv._ConvNd):
group_decay.append(m.weight)
if m.bias is not None:
group_no_decay.append(m.bias)
elif isinstance(m, nn.modules.batchnorm._BatchNorm):
if m.weight is not None:
group_no_decay.append(m.weight)
if m.bias is not None:
group_no_decay.append(m.bias)
elif isinstance(m, nn.GroupNorm):
if m.weight is not None:
group_no_decay.append(m.weight)
if m.bias is not None:
group_no_decay.append(m.bias)
assert len(list(
module.parameters())) == len(group_decay) + len(group_no_decay)
return [
dict(params=group_decay),
dict(params=group_no_decay, weight_decay=.0)
]
def adjust_learning_rate(optimizer, args):
if args.cur_iter < args.warmup_iters:
frac = args.cur_iter / args.warmup_iters
step = args.lr - args.warmup_lr
args.running_lr = args.warmup_lr + step * frac
else:
frac = (float(args.cur_iter) - args.warmup_iters) / (
args.max_iters - args.warmup_iters)
scale_running_lr = max((1. - frac), 0.)**args.lr_pow
args.running_lr = args.lr * scale_running_lr
for param_group in optimizer.param_groups:
param_group['lr'] = args.running_lr
def save_model(net, path, args):
state_dict = OrderedDict({
'args': args.__dict__,
'kwargs': {
'backbone': net.module.backbone,
'use_rnn': net.module.use_rnn,
},
'state_dict': net.state_dict(),
})
pipesave(state_dict, path)
def load_trained_model(Net, path, *args):
state_dict = pipeload(path, map_location='cpu')
net = Net(*args)
net.load_state_dict(state_dict['state_dict'], strict=False)
return net
def pipeload(filepath: str, **kwargs):
if not filepath.startswith("hdfs://"):
return torch.load(filepath, **kwargs)
with hopen(filepath, "rb") as reader:
accessor = io.BytesIO(reader.read())
state_dict = torch.load(accessor, **kwargs)
del accessor
return state_dict
def pipesave(obj, filepath: str, **kwargs):
if filepath.startswith("hdfs://"):
with hopen(filepath, "wb") as writer:
torch.save(obj, writer, **kwargs)
else:
torch.save(obj, filepath, **kwargs)
HADOOP_BIN = 'PATH=/usr/bin:$PATH hdfs'
from contextlib import contextmanager
@contextmanager
def hopen(hdfs_path, mode="r"):
pipe = None
if mode.startswith("r"):
pipe = subprocess.Popen(
"{} dfs -text {}".format(HADOOP_BIN, hdfs_path), shell=True, stdout=subprocess.PIPE)
yield pipe.stdout
pipe.stdout.close()
pipe.wait()
return
if mode == "wa":
pipe = subprocess.Popen(
"{} dfs -appendToFile - {}".format(HADOOP_BIN, hdfs_path), shell=True, stdin=subprocess.PIPE)
yield pipe.stdin
pipe.stdin.close()
pipe.wait()
return
if mode.startswith("w"):
pipe = subprocess.Popen(
"{} dfs -put -f - {}".format(HADOOP_BIN, hdfs_path), shell=True, stdin=subprocess.PIPE)
yield pipe.stdin
pipe.stdin.close()
pipe.wait()
return
raise RuntimeError("unsupported io mode: {}".format(mode))
def js_div(p_output, q_output, get_softmax=True):
"""
Function that measures JS divergence between target and output logits:
"""
if get_softmax:
p_output = F.softmax(p_output, -1)
q_output = F.softmax(q_output, -1)
log_mean_output = ((p_output + q_output )/2).log()
ploss = F.kl_div(log_mean_output, p_output, reduction='batchmean')
qloss = F.kl_div(log_mean_output, q_output, reduction='batchmean')
return (ploss + qloss) / 2 | 4,291 | 32.271318 | 105 | py |
DMH-Net | DMH-Net-main/misc/post_proc.py | import numpy as np
from scipy.ndimage import map_coordinates
from scipy.spatial.distance import pdist, squareform
from sklearn.decomposition import PCA
from scipy.ndimage.filters import maximum_filter
from scipy.spatial import distance
PI = float(np.pi)
def interp_point_by_u(xy0, xy1, u0, u1, u):
c0 = np.linalg.norm(xy0)
c1 = np.linalg.norm(xy1)
ratio = np.abs(np.sin(u - u1) * c1 / np.sin(u - u0) / c0)
return xy0 + (xy1 - xy0) * 1 / (1 + ratio)
def get_maxi_loc(signal, min_v, r):
if (len(signal) == 0):
return np.empty(0, )
if np.mean(signal) > min_v:
min_v = np.mean(signal)
max_v = maximum_filter(signal, size=r, mode='wrap')
pk_loc = np.where(max_v == signal)[0]
pk_loc = pk_loc[signal[pk_loc] > min_v]
#print(pk_loc.shape)
return pk_loc
'''
def gen_doors(cor_id, y_seg_, r, min_v, cam_height = 1.6, W = 1024, H = 512):
ww_loc = cor_id[::2,0].astype(int)
floor_z = -cam_height
floor_xy = np_coor2xy(cor_id[1::2], floor_z, W, H, floorW=1, floorH=1)
u = np_coorx2u(cor_id[1::2, 0], W)
signal_full = y_seg_
doors_on_wall = []
doors_score = []
for idx in range(0, ww_loc.shape[0]):
start_xy = floor_xy[idx, :]
end_xy = floor_xy[(idx+1) % ww_loc.shape[0], :]
if(np.sum(abs(start_xy - end_xy)) < 0.6):
continue
if(idx == ww_loc.shape[0] - 1):
signal1 = signal_full[np.arange(ww_loc[-1],W)]
signal2 = signal_full[np.arange(0, ww_loc[0])]
pk_loc1 = get_maxi_loc(signal1, min_v, r)
pk_loc1 += ww_loc[-1]
pk_loc2 = get_maxi_loc(signal2, min_v, r)
pk_loc = np.concatenate([pk_loc1,pk_loc2])
#print('pk', pk_loc1, pk_loc2)
else:
signal = signal_full[np.arange(ww_loc[idx], ww_loc[idx + 1])]
pk_loc = get_maxi_loc(signal, min_v, r)
pk_loc += ww_loc[idx]
#max_v = max_v[pk_loc]
#if(idx == ww_loc.shape[0] - 1):
# pk_loc = pk_loc % W
u_doors = np_coorx2u(pk_loc, W)
door_xys = []
for u_door in u_doors:
door_xy = interp_point_by_u(start_xy, end_xy, u[idx], u[(idx+1) % ww_loc.shape[0]], u_door)
door_xys.append(door_xy)
door_xys = np.array(door_xys)
doors_on_wall.append(door_xys)
return doors_on_wall
'''
def gen_doors(cor_id, xs_seg_wd_, xs_seg_dw_, cam_height=1.6, W=1024, H=512):
ww_loc = cor_id[::2, 0].astype(int)
floor_z = -cam_height
floor_xy = np_coor2xy(cor_id[1::2], floor_z, W, H, floorW=1, floorH=1)
u = np_coorx2u(cor_id[1::2, 0], W)
cor = np.roll(ww_loc, 1, axis=0)
wd_bounds_left = np.sign(xs_seg_wd_.reshape(-1, 1) - ww_loc.reshape(1, -1))
wd_bounds_right = np.sign(xs_seg_wd_.reshape(-1, 1) - np.roll(ww_loc, -1, axis=0).reshape(1, -1))
dw_bounds_left = np.sign(xs_seg_wd_.reshape(-1, 1) - ww_loc.reshape(1, -1))
dw_bounds_right = np.sign(xs_seg_wd_.reshape(-1, 1) - np.roll(ww_loc, -1, axis=0).reshape(1, -1))
doors_on_wall = []
doors_score = []
u_doors_lb = np_coorx2u(xs_seg_wd_, W)
u_doors_rb = np_coorx2u(xs_seg_dw_, W)
# print(u_doors_lb)
for idx in range(0, ww_loc.shape[0]):
start_xy = floor_xy[idx, :]
end_xy = floor_xy[(idx + 1) % ww_loc.shape[0], :]
start_u = ww_loc[idx]
end_u = ww_loc[(idx + 1) % ww_loc.shape[0]]
if (np.sum(abs(start_xy - end_xy)) < 0.6):
continue
if (idx == ww_loc.shape[0] - 1):
signal1 = signal_full[np.arange(ww_loc[-1], W)]
signal2 = signal_full[np.arange(0, ww_loc[0])]
pk_loc1 = get_maxi_loc(signal1, min_v, r)
pk_loc1 += ww_loc[-1]
pk_loc2 = get_maxi_loc(signal2, min_v, r)
pk_loc = np.concatenate([pk_loc1, pk_loc2])
#print('pk', pk_loc1, pk_loc2)
else:
signal = signal_full[np.arange(ww_loc[idx], ww_loc[idx + 1])]
pk_loc = get_maxi_loc(signal, min_v, r)
pk_loc += ww_loc[idx]
#max_v = max_v[pk_loc]
#if(idx == ww_loc.shape[0] - 1):
# pk_loc = pk_loc % W
u_doors = np_coorx2u(pk_loc, W)
door_xys = []
for u_door in u_doors:
door_xy = interp_point_by_u(start_xy, end_xy, u[idx], u[(idx + 1) % ww_loc.shape[0]], u_door)
door_xys.append(door_xy)
door_xys = np.array(door_xys)
doors_on_wall.append(door_xys)
draw_floor_plan(floor_xy)
return doors_on_wall
def check_even_dps(dps, del_candidates, MIN_DOOR_WIDTH=0.4, MAX_D2D_DIST=0.2):
door_widths = np.linalg.norm(dps[::2, :] - dps[1::2, :], axis=1)
if (np.sum(door_widths < MIN_DOOR_WIDTH) == 0):
return dps
# we can only solve this case by assuming head and tail are bad dets
if (dps.shape[0] == 2):
return
shrink_door_widths = np.linalg.norm(dps[2::2, :] - dps[1:-1:2, :], axis=1)
if (np.sum(shrink_door_widths < MIN_DOOR_WIDTH) == 0):
if ((del_candidates[0] < MAX_D2D_DIST) and (del_candidates[1] < MAX_D2D_DIST)):
dps = np.delete(dps, [0, -1], 0)
return dps
#fall back to whatever
return dps
def check_odd_dps(dps, del_candidate_pair, MIN_DOOR_WIDTH=0.4, MAX_D2D_DIST=0.2):
if (dps.shape[0] == 1):
return
door_widths_0 = np.linalg.norm(dps[0:-1:2, :] - dps[1::2, :], axis=1)
door_widths_1 = np.linalg.norm(dps[1::2, :] - dps[2::2, :], axis=1)
if ((del_candidate_pair[0] < MAX_D2D_DIST) and (np.sum(door_widths_1 < MIN_DOOR_WIDTH) == 0)):
dps = np.delete(dps, 0, 0)
return dps
if ((del_candidate_pair[1] < MAX_D2D_DIST) and (np.sum(door_widths_0 < MIN_DOOR_WIDTH) == 0)):
dps = np.delete(dps, -1, 0)
return dps
break_idx = np.argmin(np.linalg.norm(dps[0:-1:1, :] - dps[1::1, :],
axis=1))
dps = np.delete(dps, break_idx + 1, 0)
door_widths = np.linalg.norm(dps[0:-1:2, :] - dps[1::2, :], axis=1)
if (np.sum(door_widths_0 < MIN_DOOR_WIDTH) == 0):
return dps
return
def remove_narrow_doors(dps, MIN_DOOR_WIDTH):
rows_to_del = []
while True: # can be optimized by only updating the width needed for updating
dps = np.delete(dps, rows_to_del, 0)
rows_to_del = []
for dp_idx in range(len(dps)):
cand_widths = [-1, -1]
if (dp_idx > 0):
cand_widths[0] = np.sum(np.abs(dps[dp_idx - 1] - dps[dp_idx]))
if (dp_idx < len(dps) - 1):
cand_widths[1] = np.sum(np.abs(dps[dp_idx + 1] - dps[dp_idx]))
if (np.array(cand_widths).max() < MIN_DOOR_WIDTH):
rows_to_del.append(dp_idx)
if (len(rows_to_del) < 1):
return dps
def filter_door_points(dps_on_walls, MIN_DOOR_WIDTH=0.5, MAX_D2D_DIST=0.2):
final_doors_on_walls = []
# remove all the singular door points which are too close for both sides
for dps_idx in range(len(dps_on_walls)):
dps = dps_on_walls[dps_idx]
dps_on_walls[dps_idx] = remove_narrow_doors(dps, MIN_DOOR_WIDTH)
# check head and tail for deletion candidates
del_candidates = []
for dps_idx in range(len(dps_on_walls)):
head_dist, tail_dist = 99, 99
dps = dps_on_walls[dps_idx]
if (not dps.any()):
del_candidates.append([head_dist, tail_dist])
continue
pre_dps = dps_on_walls[(dps_idx - 1 + len(dps_on_walls)) % len(dps_on_walls)]
nxt_dps = dps_on_walls[(dps_idx + 1 + len(dps_on_walls)) % len(dps_on_walls)]
if (pre_dps.any()):
head_dist = np.linalg.norm(dps[0, :] - pre_dps[-1, :])
if (nxt_dps.any()):
tail_dist = np.linalg.norm(dps[-1, :] - nxt_dps[0, :])
del_candidates.append([head_dist, tail_dist])
for dps_idx in range(len(dps_on_walls)):
dps = dps_on_walls[dps_idx]
if (not dps.any()):
continue
if (dps.shape[0] % 2 == 0):
res = check_even_dps(dps, del_candidates[dps_idx], MIN_DOOR_WIDTH, MAX_D2D_DIST)
if (dps.shape[0] % 2 == 1):
res = check_odd_dps(dps, del_candidates[dps_idx], MIN_DOOR_WIDTH, MAX_D2D_DIST)
if (res is not None):
final_doors_on_walls.append(np.array(res))
return final_doors_on_walls
def get_dist_to_center(x, y, floorW=1024, floorH=512):
return np.sqrt((x - floorW / 2)**2 + (y - floorH / 2)**2)
def check_inside_range(u0, u_min, u_max):
if (u_min < u_max):
if ((u0 > u_min) and (u0 < u_max)):
return True
else:
return False
if ((u0 > u_min) or (u0 < u_max)):
return True
return False
def fuv2img(fuv, coorW=1024, floorW=1024, floorH=512):
'''
Project 1d signal in uv space to 2d floor plane image
'''
floor_plane_x, floor_plane_y = np.meshgrid(range(floorW), range(floorH))
floor_plane_x, floor_plane_y = -(floor_plane_y -
floorH / 2), floor_plane_x - floorW / 2
floor_plane_coridx = (np.arctan2(floor_plane_y, floor_plane_x) /
(2 * PI) + 0.5) * coorW - 0.5
floor_plane = map_coordinates(fuv,
floor_plane_coridx.reshape(1, -1),
order=1,
mode='wrap')
floor_plane = floor_plane.reshape(floorH, floorW)
return floor_plane
def np_coorx2u(coorx, coorW=1024):
return ((coorx + 0.5) / coorW - 0.5) * 2 * PI
def np_coory2v(coory, coorH=512):
return -((coory + 0.5) / coorH - 0.5) * PI
def np_coor2xy(coor, z=50, coorW=1024, coorH=512, floorW=1024, floorH=512):
'''
coor: N x 2, index of array in (col, row) format
'''
coor = np.array(coor)
u = np_coorx2u(coor[:, 0], coorW)
v = np_coory2v(coor[:, 1], coorH)
c = z / np.tan(v)
x = c * np.sin(u) + floorW / 2 - 0.5
y = -c * np.cos(u) + floorH / 2 - 0.5
return np.hstack([x[:, None], y[:, None]])
def np_x_y_solve_u(x, y, floorW=1024, floorH=512):
return np.arctan2((y - floorH / 2 + 0.5), (x - floorW / 2 + 0.5))
def np_x_u_solve_y(x, u, floorW=1024, floorH=512):
c = (x - floorW / 2 + 0.5) / np.sin(u)
return -c * np.cos(u) + floorH / 2 - 0.5
def np_y_u_solve_x(y, u, floorW=1024, floorH=512):
c = -(y - floorH / 2 + 0.5) / np.cos(u)
return c * np.sin(u) + floorW / 2 - 0.5
def np_xy2coor(xy, z=50, coorW=1024, coorH=512, floorW=1024, floorH=512):
'''
xy: N x 2
'''
x = xy[:, 0] - floorW / 2 + 0.5
y = xy[:, 1] - floorH / 2 + 0.5
u = np.arctan2(x, -y)
v = np.arctan(z / np.sqrt(x**2 + y**2))
coorx = (u / (2 * PI) + 0.5) * coorW - 0.5
coory = (-v / PI + 0.5) * coorH - 0.5
return np.hstack([coorx[:, None], coory[:, None]])
def mean_percentile(vec, p1=25, p2=75):
vmin = np.percentile(vec, p1)
vmax = np.percentile(vec, p2)
return vec[(vmin <= vec) & (vec <= vmax)].mean()
def vote(vec, tol):
vec = np.sort(vec)
n = np.arange(len(vec))[::-1]
n = n[:, None] - n[None, :] + 1.0
l = squareform(pdist(vec[:, None], 'minkowski', p=1) + 1e-9)
invalid = (n < len(vec) * 0.4) | (l > tol)
if (~invalid).sum() == 0 or len(vec) < tol:
best_fit = np.median(vec)
p_score = 0
else:
l[invalid] = 1e5
n[invalid] = -1
score = n
max_idx = score.argmax()
max_row = max_idx // len(vec)
max_col = max_idx % len(vec)
if max_col <= max_row:
print('vec', len(vec), 'max idx', max_idx)
best_fit = vec[max_row:max_col + 1].mean()
p_score = (max_col - max_row + 1) / len(vec)
l1_score = np.abs(vec - best_fit).mean()
return best_fit, p_score, l1_score
def get_z1(coory0, coory1, z0=50, coorH=512):
v0 = np_coory2v(coory0, coorH)
v1 = np_coory2v(coory1, coorH)
c0 = z0 / np.tan(v0)
z1 = c0 * np.tan(v1)
return z1
def np_refine_by_fix_z(coory0, coory1, z0=50, coorH=512):
'''
Refine coory1 by coory0
coory0 are assumed on given plane z
'''
v0 = np_coory2v(coory0, coorH)
v1 = np_coory2v(coory1, coorH)
c0 = z0 / np.tan(v0)
z1 = c0 * np.tan(v1)
z1_mean = mean_percentile(z1)
v1_refine = np.arctan2(z1_mean, c0)
coory1_refine = (-v1_refine / PI + 0.5) * coorH - 0.5
return coory1_refine, z1_mean
def infer_coory(coory0, h, z0=50, coorH=512):
v0 = np_coory2v(coory0, coorH)
c0 = z0 / np.tan(v0)
z1 = z0 + h
v1 = np.arctan2(z1, c0)
return (-v1 / PI + 0.5) * coorH - 0.5
def get_gpid(coorx, coorW):
gpid = np.zeros(coorW)
gpid[np.round(coorx).astype(int)] = 1
gpid = np.cumsum(gpid).astype(int)
gpid[gpid == gpid[-1]] = 0
return gpid
def get_gpid_idx(gpid, j):
idx = np.where(gpid == j)[0]
if idx[0] == 0 and idx[-1] != len(idx) - 1:
_shift = -np.where(idx != np.arange(len(idx)))[0][0]
idx = np.roll(idx, _shift)
return idx
def gpid_two_split(xy, tpid_a, tpid_b):
m = np.arange(len(xy)) + 1
cum_a = np.cumsum(xy[:, tpid_a])
cum_b = np.cumsum(xy[::-1, tpid_b])
l1_a = cum_a / m - cum_a / (m * m)
l1_b = cum_b / m - cum_b / (m * m)
l1_b = l1_b[::-1]
score = l1_a[:-1] + l1_b[1:]
best_split = score.argmax() + 1
va = xy[:best_split, tpid_a].mean()
vb = xy[best_split:, tpid_b].mean()
return va, vb
def _get_rot_rad(px, py):
if px < 0:
px, py = -px, -py
rad = np.arctan2(py, px) * 180 / np.pi
if rad > 45:
return 90 - rad
if rad < -45:
return -90 - rad
return -rad
def get_rot_rad(init_coorx, coory, z=50, coorW=1024, coorH=512, floorW=1024, floorH=512, tol=5):
gpid = get_gpid(init_coorx, coorW)
coor = np.hstack([np.arange(coorW)[:, None], coory[:, None]])
xy = np_coor2xy(coor, z, coorW, coorH, floorW, floorH)
xy_cor = []
rot_rad_suggestions = []
for j in range(len(init_coorx)):
pca = PCA(n_components=1)
pca.fit(xy[gpid == j])
rot_rad_suggestions.append(_get_rot_rad(*pca.components_[0]))
rot_rad_suggestions = np.sort(rot_rad_suggestions + [1e9])
rot_rad = np.mean(rot_rad_suggestions[:-1])
best_rot_rad_sz = -1
last_j = 0
for j in range(1, len(rot_rad_suggestions)):
if rot_rad_suggestions[j] - rot_rad_suggestions[j - 1] > tol:
last_j = j
elif j - last_j > best_rot_rad_sz:
rot_rad = rot_rad_suggestions[last_j:j + 1].mean()
best_rot_rad_sz = j - last_j
dx = int(round(rot_rad * 1024 / 360))
return dx, rot_rad
def gen_ww_cuboid(xy, gpid, tol):
xy_cor = []
assert len(np.unique(gpid)) == 4
# For each part seperated by wall-wall peak, voting for a wall
for j in range(4):
now_x = xy[gpid == j, 0]
now_y = xy[gpid == j, 1]
new_x, x_score, x_l1 = vote(now_x, tol)
new_y, y_score, y_l1 = vote(now_y, tol)
if (x_score, -x_l1) > (y_score, -y_l1):
xy_cor.append({'type': 0, 'val': new_x, 'score': x_score})
else:
xy_cor.append({'type': 1, 'val': new_y, 'score': y_score})
# Sanity fallback
scores = [0, 0]
for j in range(4):
if xy_cor[j]['type'] == 0:
scores[j % 2] += xy_cor[j]['score']
else:
scores[j % 2] -= xy_cor[j]['score']
if scores[0] > scores[1]:
xy_cor[0]['type'] = 0
xy_cor[1]['type'] = 1
xy_cor[2]['type'] = 0
xy_cor[3]['type'] = 1
else:
xy_cor[0]['type'] = 1
xy_cor[1]['type'] = 0
xy_cor[2]['type'] = 1
xy_cor[3]['type'] = 0
return xy_cor
def gen_left_right_doors(cor_id, y_seg_wd_, y_seg_dw_, r, min_v, cam_height=1.6, W=1024, H=512):
signal_door_head = y_seg_wd_
pk_loc_head = get_maxi_loc(signal_door_head, min_v, r)
signal_door_tail = y_seg_dw_
pk_loc_tail = get_maxi_loc(signal_door_tail, min_v, r)
ww_loc = cor_id[::2, 0].astype(int)
floor_z = -cam_height
floor_xy = np_coor2xy(cor_id[1::2], floor_z, W, H, floorW=1, floorH=1)
u = np_coorx2u(cor_id[1::2, 0], W)
signal_full = y_seg_
doors_on_wall = []
doors_score = []
for idx in range(0, ww_loc.shape[0]):
start_xy = floor_xy[idx, :]
end_xy = floor_xy[(idx + 1) % ww_loc.shape[0], :]
if (np.sum(abs(start_xy - end_xy)) < 0.6):
continue
if (idx == ww_loc.shape[0] - 1):
signal1 = signal_full[np.arange(ww_loc[-1], W)]
signal2 = signal_full[np.arange(0, ww_loc[0])]
pk_loc1 = get_maxi_loc(signal1, min_v, r)
pk_loc1 += ww_loc[-1]
pk_loc2 = get_maxi_loc(signal2, min_v, r)
pk_loc = np.concatenate([pk_loc1, pk_loc2])
#print('pk', pk_loc1, pk_loc2)
else:
signal = signal_full[np.arange(ww_loc[idx], ww_loc[idx + 1])]
pk_loc = get_maxi_loc(signal, min_v, r)
pk_loc += ww_loc[idx]
#max_v = max_v[pk_loc]
#if(idx == ww_loc.shape[0] - 1):
# pk_loc = pk_loc % W
u_doors = np_coorx2u(pk_loc, W)
door_xys = []
for u_door in u_doors:
door_xy = interp_point_by_u(start_xy, end_xy, u[idx], u[(idx + 1) % ww_loc.shape[0]], u_door)
door_xys.append(door_xy)
door_xys = np.array(door_xys)
doors_on_wall.append(door_xys)
return doors_on_wall
def gen_ww_general(init_coorx, xy, gpid, tol):
xy_cor = []
assert len(init_coorx) == len(np.unique(gpid))
# Candidate for each part seperated by wall-wall boundary
for j in range(len(init_coorx)):
now_x = xy[gpid == j, 0]
now_y = xy[gpid == j, 1]
if (len(now_x) < 2):
continue
new_x, x_score, x_l1 = vote(now_x, tol)
new_y, y_score, y_l1 = vote(now_y, tol)
u0 = np_coorx2u(init_coorx[(j - 1 + len(init_coorx)) % len(init_coorx)])
u1 = np_coorx2u(init_coorx[j])
if (x_score, -x_l1) > (y_score, -y_l1):
xy_cor.append({
'type': 0,
'val': new_x,
'score': x_score,
'action': 'ori',
'gpid': j,
'u0': u0,
'u1': u1,
'tbd': True
})
else:
xy_cor.append({
'type': 1,
'val': new_y,
'score': y_score,
'action': 'ori',
'gpid': j,
'u0': u0,
'u1': u1,
'tbd': True
})
# Construct wall from highest score to lowest
while True:
# Finding undetermined wall with highest score
tbd = -1
for i in range(len(xy_cor)):
if xy_cor[i]['tbd'] and (tbd == -1 or xy_cor[i]['score'] > xy_cor[tbd]['score']):
tbd = i
if tbd == -1:
break
# This wall is determined
xy_cor[tbd]['tbd'] = False
p_idx = (tbd - 1 + len(xy_cor)) % len(xy_cor)
n_idx = (tbd + 1) % len(xy_cor)
num_tbd_neighbor = xy_cor[p_idx]['tbd'] + xy_cor[n_idx]['tbd']
# Two adjacency walls are not determined yet => not special case
if num_tbd_neighbor == 2:
continue
# Only one of adjacency two walls is determine => add now or later special case
if num_tbd_neighbor == 1:
if (not xy_cor[p_idx]['tbd'] and xy_cor[p_idx]['type'] == xy_cor[tbd]['type']) or\
(not xy_cor[n_idx]['tbd'] and xy_cor[n_idx]['type'] == xy_cor[tbd]['type']):
# Current wall is different from one determined adjacency wall
if xy_cor[tbd]['score'] >= -1:
# Later special case, add current to tbd
xy_cor[tbd]['tbd'] = True
xy_cor[tbd]['score'] -= 100
else:
# Fallback: forced change the current wall or infinite loop
if not xy_cor[p_idx]['tbd']:
insert_at = tbd
if xy_cor[p_idx]['type'] == 0:
new_val = np_x_u_solve_y(xy_cor[p_idx]['val'], xy_cor[p_idx]['u1'])
new_type = 1
else:
new_val = np_y_u_solve_x(xy_cor[p_idx]['val'], xy_cor[p_idx]['u1'])
new_type = 0
else:
insert_at = n_idx
if xy_cor[n_idx]['type'] == 0:
new_val = np_x_u_solve_y(xy_cor[n_idx]['val'], xy_cor[n_idx]['u0'])
new_type = 1
else:
new_val = np_y_u_solve_x(xy_cor[n_idx]['val'], xy_cor[n_idx]['u0'])
new_type = 0
new_add = {
'type': new_type,
'val': new_val,
'score': 0,
'action': 'forced infer',
'gpid': -1,
'u0': -1,
'u1': -1,
'tbd': False
}
xy_cor.insert(insert_at, new_add)
continue
# Below checking special case
if xy_cor[p_idx]['type'] == xy_cor[n_idx]['type']:
# Two adjacency walls are same type, current wall should be differen type
if xy_cor[tbd]['type'] == xy_cor[p_idx]['type']:
# Fallback: three walls with same type => forced change the middle wall
xy_cor[tbd]['type'] = (xy_cor[tbd]['type'] + 1) % 2
xy_cor[tbd]['action'] = 'forced change'
xy_cor[tbd]['val'] = xy[gpid == xy_cor[tbd]['gpid'], xy_cor[tbd]['type']].mean()
else:
# Two adjacency walls are different type => add one
tp0 = xy_cor[n_idx]['type']
tp1 = xy_cor[p_idx]['type']
if xy_cor[p_idx]['type'] == 0:
val0 = np_x_u_solve_y(xy_cor[p_idx]['val'], xy_cor[p_idx]['u1'])
val1 = np_y_u_solve_x(xy_cor[n_idx]['val'], xy_cor[n_idx]['u0'])
else:
val0 = np_y_u_solve_x(xy_cor[p_idx]['val'], xy_cor[p_idx]['u1'])
val1 = np_x_u_solve_y(xy_cor[n_idx]['val'], xy_cor[n_idx]['u0'])
new_add = [
{
'type': tp0,
'val': val0,
'score': 0,
'action': 'forced infer',
'gpid': -1,
'u0': -1,
'u1': -1,
'tbd': False
},
{
'type': tp1,
'val': val1,
'score': 0,
'action': 'forced infer',
'gpid': -1,
'u0': -1,
'u1': -1,
'tbd': False
},
]
xy_cor = xy_cor[:tbd] + new_add + xy_cor[tbd + 1:]
return xy_cor
def gen_ww_general_with_order_constraints(init_coorx, xy, gpid, tol):
xy_cor = []
if (len(init_coorx) != len(np.unique(gpid))):
print('init_coorx', init_coorx, 'gpid len', len(np.unique(gpid)))
# Candidate for each part seperated by wall-wall boundary
for j in range(len(init_coorx)):
now_x = xy[gpid == j, 0]
now_y = xy[gpid == j, 1]
if (len(now_x)) < 2:
continue
new_x, x_score, x_l1 = vote(now_x, tol)
new_y, y_score, y_l1 = vote(now_y, tol)
u0 = np_coorx2u(init_coorx[(j - 1 + len(init_coorx)) % len(init_coorx)])
u1 = np_coorx2u(init_coorx[j])
if (x_score, -x_l1) > (y_score, -y_l1):
xy_cor.append({
'type': 0,
'val': new_x,
'score': x_score,
'action': 'ori',
'gpid': j,
'u0': u0,
'u1': u1,
'tbd': True
})
else:
xy_cor.append({
'type': 1,
'val': new_y,
'score': y_score,
'action': 'ori',
'gpid': j,
'u0': u0,
'u1': u1,
'tbd': True
})
# Construct wall from highest score to lowest
while True:
# Finding undetermined wall with highest score
tbd = -1
for i in range(len(xy_cor)):
if xy_cor[i]['tbd'] and (tbd == -1 or xy_cor[i]['score'] > xy_cor[tbd]['score']):
tbd = i
if tbd == -1:
break
# This wall is determined
xy_cor[tbd]['tbd'] = False
p_idx = (tbd - 1 + len(xy_cor)) % len(xy_cor)
n_idx = (tbd + 1) % len(xy_cor)
num_tbd_neighbor = xy_cor[p_idx]['tbd'] + xy_cor[n_idx]['tbd']
# Two adjacency walls are not determined yet => not special case
if num_tbd_neighbor == 2:
continue
# Only one of adjacency two walls is determine => add now or later special case
if num_tbd_neighbor == 1:
#print('test1')
if (not xy_cor[p_idx]['tbd'] and xy_cor[p_idx]['type'] == xy_cor[tbd]['type']) or\
(not xy_cor[n_idx]['tbd'] and xy_cor[n_idx]['type'] == xy_cor[tbd]['type']):
# Current wall is different from one determined adjacency wall
if xy_cor[tbd]['score'] >= -1:
# Later special case, add current to tbd
xy_cor[tbd]['tbd'] = True
xy_cor[tbd]['score'] -= 100
else:
# Fallback: forced change the current wall or infinite loop
if not xy_cor[p_idx]['tbd']:
insert_at = tbd
if xy_cor[p_idx]['type'] == 0:
new_val = np_x_u_solve_y(xy_cor[p_idx]['val'], xy_cor[p_idx]['u1'])
new_type = 1
else:
new_val = np_y_u_solve_x(xy_cor[p_idx]['val'], xy_cor[p_idx]['u1'])
new_type = 0
else:
insert_at = n_idx
if xy_cor[n_idx]['type'] == 0:
new_val = np_x_u_solve_y(xy_cor[n_idx]['val'], xy_cor[n_idx]['u0'])
new_type = 1
else:
new_val = np_y_u_solve_x(xy_cor[n_idx]['val'], xy_cor[n_idx]['u0'])
new_type = 0
new_add = {
'type': new_type,
'val': new_val,
'score': 0,
'action': 'forced infer',
'gpid': -1,
'u0': -1,
'u1': -1,
'tbd': False
}
xy_cor.insert(insert_at, new_add)
continue
# Below checking special case
if xy_cor[p_idx]['type'] == xy_cor[n_idx]['type']:
# Two adjacency walls are same type, current wall should be differen type
if xy_cor[tbd]['type'] == xy_cor[p_idx]['type']:
# Fallback: three walls with same type => forced change the middle wall
xy_cor[tbd]['type'] = (xy_cor[tbd]['type'] + 1) % 2
xy_cor[tbd]['action'] = 'forced change'
xy_cor[tbd]['val'] = xy[gpid == xy_cor[tbd]['gpid'], xy_cor[tbd]['type']].mean()
else:
# Two adjacency walls are different type => add one
#print(xy_cor[p_idx], xy_cor[tbd], xy_cor[n_idx])
tp0 = xy_cor[n_idx]['type']
tp1 = xy_cor[p_idx]['type']
if xy_cor[p_idx]['type'] == 0:
val0 = np_x_u_solve_y(xy_cor[p_idx]['val'], xy_cor[p_idx]['u1'])
val1 = np_y_u_solve_x(xy_cor[n_idx]['val'], xy_cor[n_idx]['u0'])
insert_corner_u = np_x_y_solve_u(val1, val0)
insert_dist = get_dist_to_center(val1, val0)
pre_dist = get_dist_to_center(xy_cor[p_idx]['val'], val0)
nxt_dist = get_dist_to_center(val1, xy_cor[n_idx]['val'])
else:
val0 = np_y_u_solve_x(xy_cor[p_idx]['val'], xy_cor[p_idx]['u1'])
val1 = np_x_u_solve_y(xy_cor[n_idx]['val'], xy_cor[n_idx]['u0'])
insert_corner_u = np_x_y_solve_u(val0, val1)
insert_dist = get_dist_to_center(val0, val1)
pre_dist = get_dist_to_center(val0, xy_cor[p_idx]['val'])
nxt_dist = get_dist_to_center(xy_cor[n_idx]['val'], val1)
if (check_inside_range(insert_corner_u, xy_cor[tbd]['u0'], xy_cor[tbd]['u1'])):
# no occlusion happened
new_add = [
{
'type': tp0,
'val': val0,
'score': 0,
'action': 'forced infer',
'gpid': -1,
'u0': -1,
'u1': -1,
'tbd': False
},
{
'type': tp1,
'val': val1,
'score': 0,
'action': 'forced infer',
'gpid': -1,
'u0': -1,
'u1': -1,
'tbd': False
},
]
xy_cor = xy_cor[:tbd] + new_add + xy_cor[tbd + 1:]
else:
dist_to_compare = pre_dist if (insert_corner_u < xy_cor[tbd]['u0']) else nxt_dist
if (insert_dist > dist_to_compare):
new_add = [
{
'type': tp0,
'val': val0,
'score': 0,
'action': 'forced infer',
'gpid': -1,
'u0': -1,
'u1': -1,
'tbd': False
},
{
'type': tp1,
'val': val1,
'score': 0,
'action': 'forced infer',
'gpid': -1,
'u0': -1,
'u1': -1,
'tbd': False
},
]
xy_cor = xy_cor[:tbd] + new_add + xy_cor[tbd + 1:]
else:
xy_cor = xy_cor[:tbd] + xy_cor[tbd + 1:]
return xy_cor
def gen_ww(init_coorx,
coory,
z=50,
coorW=1024,
coorH=512,
floorW=1024,
floorH=512,
tol=3,
force_cuboid=True):
init_coorx = np.delete(init_coorx, np.where(abs(init_coorx[1:]-init_coorx[:-1]) <1)[0]+1, 0)
gpid = get_gpid(init_coorx, coorW)
coor = np.hstack([np.arange(coorW)[:, None], coory[:, None]])
xy = np_coor2xy(coor, z, coorW, coorH, floorW, floorH)
# Generate wall-wall
if force_cuboid:
xy_cor = gen_ww_cuboid(xy, gpid, tol)
else:
xy_cor = gen_ww_general_with_order_constraints(init_coorx, xy, gpid, tol)
#xy_cor = gen_ww_general(init_coorx, xy, gpid, tol)
# Ceiling view to normal view
cor = []
for j in range(len(xy_cor)):
next_j = (j + 1) % len(xy_cor)
if xy_cor[j]['type'] == 1:
cor.append((xy_cor[next_j]['val'], xy_cor[j]['val']))
else:
cor.append((xy_cor[j]['val'], xy_cor[next_j]['val']))
cor = np_xy2coor(np.array(cor), z, coorW, coorH, floorW, floorH)
cor = np.roll(cor, -2 * cor[::2, 0].argmin(), axis=0)
return cor, xy_cor
def create_door_xys(floor_xy, xs_seg_wd_, xs_seg_dw_, ww_loc_padded, floor_z, u, W):
# Fix near wall-wall corner door boundary bug
xs_seg_wd_[np.where(xs_seg_wd_.reshape(-1, 1) in ww_loc_padded.reshape(1, -1))] += 1
xs_seg_dw_[np.where(xs_seg_dw_.reshape(-1, 1) in ww_loc_padded.reshape(1, -1))] -= 1
wd_bounds_left = np.sign(xs_seg_wd_.reshape(-1, 1) - ww_loc_padded.reshape(1, -1))
wd_bounds_right = np.sign(xs_seg_wd_.reshape(-1, 1) - np.roll(ww_loc_padded, -1, axis=0).reshape(1, -1))
indicator_wd = wd_bounds_right + wd_bounds_left
indicator_wd[:, 0] = indicator_wd[:, 0] * indicator_wd[:, -2]
indicator_wd = indicator_wd[:, :-2]
indicator_wd = np.roll(indicator_wd, -1, axis=1)
dw_bounds_left = np.sign(xs_seg_dw_.reshape(-1, 1) - ww_loc_padded.reshape(1, -1))
dw_bounds_right = np.sign(xs_seg_dw_.reshape(-1, 1) - np.roll(ww_loc_padded, -1, axis=0).reshape(1, -1))
indicator_dw = dw_bounds_right + dw_bounds_left
indicator_dw[:, 0] = indicator_dw[:, 0] * indicator_dw[:, -2]
indicator_dw = indicator_dw[:, :-2]
indicator_dw = np.roll(indicator_dw, -1, axis=1)
u_doors_lb = np_coorx2u(xs_seg_wd_, W)
u_doors_rb = np_coorx2u(xs_seg_dw_, W)
door_xys_lb = []
door_xys_rb = []
for idx in range(0, floor_xy.shape[0]):
start_xy = floor_xy[idx, :]
end_xy = floor_xy[(idx + 1) % floor_xy.shape[0], :]
ud_lb_selected = u_doors_lb[np.where(
indicator_wd[:, idx].reshape(-1) == 0)]
ud_rb_selected = u_doors_rb[np.where(
indicator_dw[:, idx].reshape(-1) == 0)]
for u_door in ud_lb_selected:
door_xy = interp_point_by_u(start_xy, end_xy, u[idx],
u[(idx + 1) % floor_xy.shape[0]],
u_door)
door_xys_lb.append(np.append(door_xy, [u_door, idx]))
for u_door in ud_rb_selected:
door_xy = interp_point_by_u(start_xy, end_xy, u[idx],
u[(idx + 1) % floor_xy.shape[0]],
u_door)
door_xys_rb.append(np.append(door_xy, [u_door, idx]))
return door_xys_lb, door_xys_rb
def gen_doors_from_ordered_segs(cor_id,
xs_seg_wd_,
xs_seg_dw_,
lb_scores,
rb_scores,
W=1024,
H=512,
cam_height=1.6):
# image mirror effect, so we flip the y-axis, then left bound and right bound are swapped
# ww_loc = cor_id[::2, 0].astype(int)
final_door_xys = []
ww_loc_padded = np.append(np.append(0, cor_id[::2, 0].astype(int)), W)
floor_z = -cam_height
floor_xy = np_coor2xy(cor_id[1::2], floor_z, W, H, floorW=1, floorH=1)
u = np_coorx2u(cor_id[1::2, 0], W)
if((xs_seg_dw_.shape[0] == 0) or (xs_seg_wd_.shape[0] == 0)):
return final_door_xys
roll_max_len = xs_seg_dw_.shape[0]
while ((xs_seg_wd_[0] > xs_seg_dw_[0]) and roll_max_len>0):
xs_seg_dw_ = np.roll(xs_seg_dw_, -1, axis=0)
rb_scores = np.roll(rb_scores, -1, axis=0)
roll_max_len -= 1
door_xys_lb, door_xys_rb = create_door_xys(floor_xy, xs_seg_wd_,
xs_seg_dw_, ww_loc_padded,
floor_z, u, W)
lb_idx, rb_idx = 0, 0
ori_door_point_num = len(door_xys_rb) + len(door_xys_rb)
fail_count = ori_door_point_num * 2 + 1
repeat_check_l = 0
repeat_check_r = 0
same_wall_threshold = 0.05
while (len(door_xys_lb) > 0 and len(door_xys_rb) > 0 and fail_count > 0):
lb_idx = lb_idx % len(door_xys_lb)
rb_idx = rb_idx % len(door_xys_rb)
fail_count -= 1
if (fail_count == ori_door_point_num):
same_wall_threshold = same_wall_threshold * 1.5
if (fail_count == ori_door_point_num // 2):
same_wall_threshold = same_wall_threshold * 1.5
door_lb = door_xys_lb[lb_idx]
door_rb = door_xys_rb[rb_idx]
door_vec = -door_rb[:2] + door_lb[:2]
same_wall_check = min(abs(door_vec)) < same_wall_threshold
if (not same_wall_check):
# not the same wall, so push left boundary
#if ((lb_scores[lb_idx] > rb_scores[rb_idx])
if (repeat_check_l < len(door_xys_lb)):
rb_idx += 1
repeat_check_l += 1
repeat_check_r = 0
else:
lb_idx += 1
repeat_check_r += 1
repeat_check_l = 0
continue
axis = np.argmin(abs(door_vec))
if (axis == 0):
bounds_order_check = (door_lb[0] > 0
and door_vec[1] < 0) or (door_lb[0] < 0
and door_vec[1] > 0)
else:
bounds_order_check = (door_lb[1] > 0 and door_vec[0] > 0) or (door_lb[1] < 0 and door_vec[0] < 0)
if (not bounds_order_check):
rb_idx += 1
repeat_check_l += 1
repeat_check_r = 0
continue
min_width_check = max(abs(door_vec)) > 0.2
if (not min_width_check):
rb_idx += 1
repeat_check_l += 1
repeat_check_r = 0
continue
max_width_check = max(abs(door_vec)) < 3.5
if (not max_width_check):
lb_idx += 1
repeat_check_r += 1
repeat_check_l = 0
continue
final_door_xys.append(door_xys_lb[lb_idx][:2])
door_xys_lb.pop(lb_idx)
# lb_scores = np.delete(lb_scores, lb_idx)
final_door_xys.append(door_xys_rb[rb_idx][:2])
door_xys_rb.pop(rb_idx)
# rb_scores = np.delete(rb_scores, rb_idx)
return final_door_xys | 38,034 | 36.436024 | 109 | py |
DMH-Net | DMH-Net-main/misc/__init__.py | 0 | 0 | 0 | py | |
DMH-Net | DMH-Net-main/e2plabel/main.py | import os
import pickle
from typing import Optional, List
import numpy as np
import py360convert
from matplotlib import pyplot as plt
from .e2plabelconvert import generateOnePerspectiveLabel, VIEW_ARGS, VIEW_NAME, VIEW_SIZE
data_dir = "../PanoHough/data/layoutnet_dataset/train"
COLORS = {
0: "red", # 竖直墙壁线红色
1: "green", # 天花板线绿色
2: "blue", # 地板线蓝色
}
def clearAxesLines(ax: plt.Axes):
ax.set_xticks([])
ax.set_yticks([])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
DRAW_CUBE_POSITIONS = {
"F": [1, 1],
"R": [1, 2],
"B": [1, 3],
"L": [1, 0],
"U": [0, 1],
"D": [2, 1],
"E": [0, 1, 2, 4],
}
def getCubeAxes(fig: plt.Figure, view_name):
spec = fig.add_gridspec(3, 4, hspace=0, wspace=0)
posi = DRAW_CUBE_POSITIONS[view_name]
if len(posi) == 2:
ax: plt.Axes = fig.add_subplot(spec[posi[0], posi[1]])
else:
ax: plt.Axes = fig.add_subplot(spec[posi[0]:posi[1], posi[2]:posi[3]])
clearAxesLines(ax)
return ax
def drawOnePerspectivePreview(axs: Optional[List[plt.Axes]], p_img, r):
points = r["points"]
point_types = r["point_types"]
lines = r["lines"]
for ax in (axs if axs is not None else []):
ax.imshow(p_img)
ax.scatter(points[point_types == 1, 0], points[point_types == 1, 1]) # 画点只画图片范围内的
for l in lines:
ax.plot(l[[3, 5]], l[[4, 6]], color=COLORS[int(l[2])])
ax.set_xlim(0, p_img.shape[1])
ax.set_ylim(p_img.shape[0], 0)
def main(data_dir):
SHOW_ORIGIN = False
SAVE_PREVIEW_P = True
SAVE_PERSPECTIVE_IMG_IN_LABEL_FILE = True
img_dir = os.path.join(data_dir, "img")
label_cor_dir = os.path.join(data_dir, "label_cor")
preview_p_dir = os.path.join(data_dir, "preview_p")
cube_view_dir = os.path.join(preview_p_dir, "cube")
label_p_dir = os.path.join(data_dir, "label_p")
# 创建文件夹
os.makedirs(label_p_dir, exist_ok=True)
for name in VIEW_NAME:
os.makedirs(os.path.join(preview_p_dir, name), exist_ok=True)
os.makedirs(cube_view_dir, exist_ok=True)
names = []
for i, name in enumerate(os.listdir(img_dir)):
img_name = os.path.join(img_dir, name)
img = plt.imread(img_name)
basename = os.path.splitext(name)[0]
label_cor_name = os.path.join(label_cor_dir, basename + ".txt")
with open(label_cor_name, "r") as label_fileobj:
label = np.array([[int(t) for t in s.split(" ")] for s in label_fileobj.readlines()])
cube_view_fig = plt.Figure()
cube_ax = getCubeAxes(cube_view_fig, "E")
cube_ax.imshow(img)
cube_ax.scatter(label[:, 0], label[:, 1])
if SHOW_ORIGIN:
plt.imshow(img)
plt.scatter(label[:, 0], label[:, 1])
plt.show()
result = []
for view_idx, (view_name, view) in enumerate(zip(VIEW_NAME, VIEW_ARGS)):
img_save_path = os.path.join(preview_p_dir, view_name, basename + ".png")
p_ax = [getCubeAxes(cube_view_fig, view_name)]
if SAVE_PREVIEW_P:
p_ax.append(plt.gca())
p_img = py360convert.e2p(img, *view, VIEW_SIZE)
r = generateOnePerspectiveLabel(img, label, *view, VIEW_SIZE)
drawOnePerspectivePreview(p_ax, p_img, r)
r["name"] = view_name
if SAVE_PERSPECTIVE_IMG_IN_LABEL_FILE:
r["img"] = p_img
result.append(r)
if SAVE_PREVIEW_P:
plt.savefig(img_save_path)
plt.clf()
cube_view_fig.savefig(os.path.join(cube_view_dir, basename + ".jpg"))
plt.close(cube_view_fig)
with open(os.path.join(label_p_dir, basename + ".pkl"), "wb") as f:
pickle.dump(result, f)
print("%d %s" % (i, img_name))
if __name__ == '__main__':
main(data_dir)
| 3,973 | 30.539683 | 97 | py |
DMH-Net | DMH-Net-main/e2plabel/convertExUtils.py | from functools import reduce
import numpy as np
import py360convert
from py360convert import rotation_matrix
def rotationMatrix(u, v, in_rot):
Rx = rotation_matrix(v, [1, 0, 0])
Ry = rotation_matrix(u, [0, 1, 0])
Ri = rotation_matrix(in_rot, np.array([0, 0, 1.0]).dot(Rx).dot(Ry))
return Rx.dot(Ry).dot(Ri)
def unitxyzToPerspectiveCoord(input, fov_deg, u_deg, v_deg, out_hw, in_rot_deg=0):
"""
把unitxyz转化为单个perspective image上的xy像素坐标。
:param input: (n, 3) n个点、每个点三维坐标(unitxyz)
:param fov_deg: 同py360convert.e2p函数
:param u_deg: 同py360convert.e2p函数
:param v_deg: 同py360convert.e2p函数
:param out_hw: 同py360convert.e2p函数
:param in_rot_deg: 同py360convert.e2p函数
:return: 元组。第一个元素是(n, 2) n个点、每个点在图像上的两维坐标(以像素为单位);第二个元素是(n, 3),每个点在透视坐标系中的坐标
"""
assert len(input.shape) == 2 and input.shape[1] == 3
h_fov, v_fov = fov_deg[0] * np.pi / 180, fov_deg[1] * np.pi / 180
in_rot = in_rot_deg * np.pi / 180
u = -u_deg * np.pi / 180
v = v_deg * np.pi / 180
rotMat = rotationMatrix(u, v, in_rot)
imgXyz = input.dot(rotMat.T) # 对于旋转矩阵,其逆等于其转置
imgXyz = imgXyz / np.abs(imgXyz[:, 2:]) # 使得z变为1
# 根据x y和fov,计算出可视化区域的range
x_max = np.tan(h_fov / 2)
y_max = np.tan(v_fov / 2)
# 将像素范围和range范围建立线性对应
normed_pos = imgXyz[:, :2] / np.array([x_max, -y_max], dtype=imgXyz.dtype) / 2 + 0.5
pos = normed_pos * np.array([out_hw[1] - 1, out_hw[0] - 1], dtype=imgXyz.dtype)
return pos, imgXyz
def coordE2P(input_pts, img, fov_deg, u_deg, v_deg, out_hw, in_rot_deg=0):
"""
把equirect下的坐标转换为perspective下的坐标
:param input: (n, 3) n个点、每个点三维坐标(unitxyz)
:param img: 全景equirect图像
:param fov_deg: 同py360convert.e2p函数
:param u_deg: 同py360convert.e2p函数
:param v_deg: 同py360convert.e2p函数
:param out_hw: 同py360convert.e2p函数
:param in_rot_deg: 同py360convert.e2p函数
:return: 元组。第一个元素是(n, 2) n个点、每个点两维坐标;第二个元素是整数类型(n) 表示每个点的类型:
0表示点不在180度视角内(在相机后面),xy值无意义;1表示点在180度视角内、但不在图片内,2表示点在图片内。
"""
uv = py360convert.coor2uv(input_pts, img.shape[0], img.shape[1])
xyz = py360convert.uv2unitxyz(uv)
result, imgXyz = unitxyzToPerspectiveCoord(xyz, fov_deg, u_deg, v_deg, out_hw, in_rot_deg)
type = (imgXyz[:, 2] > 0).astype(np.int8)
inimage_mask = reduce(np.logical_and, [type, result[:, 0] >= 0, result[:, 0] <= out_hw[1] - 1, result[:, 1] >= 0,
result[:, 1] <= out_hw[0] - 1])
type[inimage_mask] = 2
return result, type, imgXyz
| 2,512 | 35.42029 | 117 | py |
DMH-Net | DMH-Net-main/e2plabel/e2plabelconvert.py | from typing import List, Dict
import numpy as np
import py360convert
from .convertExUtils import coordE2P
# 视图顺序: F R B L U D
# -----
# | 4 |
# -----
# | 3 | 0 | 1 | 2 |
# -----
# | 5 |
# -----
VIEW_ARGS = [
[(90, 90), 0, 0],
[(90, 90), 90, 0],
[(90, 90), 180, 0],
[(90, 90), -90, 0],
[(90, 90), 0, 90],
[(90, 90), 0, -90],
]
VIEW_NAME = ['F', 'R', 'B', 'L', 'U', 'D']
VIEW_SIZE = (512, 512) # perspective image的size
def generatePerspective(e_img: np.ndarray, cor: np.ndarray, view_name=VIEW_NAME, view_args=VIEW_ARGS,
view_size=VIEW_SIZE) -> List[Dict[str, np.ndarray]]:
"""
:param e_img 全景图片
:param cor 角点在全景图片上的坐标
:return 含有六个元素的数组,每个元素是一个字典,表示一个面;字典内含 {
p_img:图像,
name: 面的名称,
points:(n,2),表示点在图片中的x、y坐标(浮点数)
point_types:整数(n),对应于points中每个点表示其类型,0表示点不在180度视角内(在相机后面),xy值无意义;1表示点在180度视角内、但不在图片内,2表示点在图片内。
lines:(k,7),表示图中能看到的k条线。每条线用七个数表示,前两个是端点在points中的序号,然后是线的类型:0是竖直的墙壁线,1是天花板线,2是地板线,然后是起点的x、y坐标,然后是终点的x、y坐标
}
"""
result = []
for view_idx, (view_name, view) in enumerate(zip(view_name, view_args)):
p_img = py360convert.e2p(e_img, *view, view_size)
r = generateOnePerspectiveLabel(e_img, cor, *view, VIEW_SIZE)
r["name"] = view_name
r["img"] = p_img
result.append(r)
return result
def linesPostProcess(lines, img_hw, is_updown_view, return_mask=False):
"""
对线进行处理,筛选掉看不见的线、对线的起终点进行规范化处理
:param lines:(k,7),表示图中的k条线。每条线用七个数表示,前两个是端点在points中的序号,然后是线的类型:0是竖直的墙壁线,1是天花板线,2是地板线,然后是起点的x、y坐标,然后是终点的x、y坐标
:param img_hw:(2),图片的宽和高
:return (m,8)图中能看到的m条线。每条线前7个数含义同上,第8个数表示线在视图中的方向:0竖直线1水平线2过中心线
"""
# !!!根据10月12日推导出的结果:使用py360convert的e2p、e2c变换得出的点,3d空间中坐标-1~1对应的范围应当是2d图片中像素0~h-1,而不是0~h!
# 以h=512为例,在图片2d坐标系下的511.5是没有意义(在当前平面内不可见)的!
# 因此作出修改!
# 虽然分析觉得,这个小错误并不会对结果有实质性的影响(因为最多只会差一个像素),但还是改过来吧!
# unitxyzToPerspectiveCoord、coordE2P、generateOnePerspectiveLabel、lineCoordToRatio四个函数也做了相同的修改。
img_hw = [img_hw[0] - 1, img_hw[1] - 1]
def processPoint(point, k):
xInRange = False
if point[0] < 0:
y = k * (0 - point[0]) + point[1]
if 0 <= y <= img_hw[0]:
return 0, y
elif point[0] > img_hw[1]:
y = k * (img_hw[1] - point[0]) + point[1]
if 0 <= y <= img_hw[0]:
return img_hw[1], y
else:
xInRange = True
if point[1] < 0:
x = (0 - point[1]) / k + point[0]
if 0 <= x <= img_hw[1]:
return x, 0
elif point[1] > img_hw[0]:
x = (img_hw[0] - point[1]) / k + point[0]
if 0 <= x <= img_hw[1]:
return x, img_hw[0]
else:
if xInRange:
return point
return None
result = []
mask = []
for line in lines:
k = (line[6] - line[4]) / (line[5] - line[3])
p1Res = processPoint(line[3:5], k)
p2Res = processPoint(line[5:7], k)
if p1Res is not None and p2Res is not None:
if line[2] == 0:
direct = 2 if is_updown_view else 0
else:
if is_updown_view:
direct = 1 if -1 <= k <= 1 else 0
else:
yLR = (k * (0 - p1Res[0]) + p1Res[1], k * (img_hw[1] - p1Res[0]) + p1Res[1])
if (yLR[0] < img_hw[0] / 2 and yLR[1] > img_hw[0] / 2) or (
yLR[1] < img_hw[0] / 2 and yLR[0] > img_hw[0] / 2):
direct = 2
else:
direct = 1
result.append(np.concatenate((line[0:3], p1Res, p2Res, (direct,))))
mask.append(True)
else:
mask.append(False)
if not return_mask:
return result
else:
return result, mask
def generateOnePerspectiveLabel(e_img, e_label, fov_deg, u_deg, v_deg, out_hw,
in_rot_deg=0):
"""
根据给定的perspective参数,生成label信息,并可以可视化。
:param e_img: 全景equirect图像
:param e_label: (n, 2)在全景equirect坐标系下的gt角点坐标
:param ax: 画图的pyplot.Axes
# :param img_save_path: 若传入True,则把结果图像通过plt.show显示;若传入其他字符串,则保存为文件;否则不显示和保存结果图像。
:param fov_deg: 同py360convert.e2p函数
:param u_deg: 同py360convert.e2p函数
:param v_deg: 同py360convert.e2p函数
:param out_hw: 同py360convert.e2p函数
:param in_rot_deg: 同py360convert.e2p函数
:return: 字典,是图片中的点和线段信息,内含"points" "point_types" "lines" "lines" 三个字段。
points:(n,2),表示点在图片中的x、y坐标(浮点数)
point_types:整数(n),对应于points中每个点表示其类型,0表示点不在180度视角内(在相机后面),xy值无意义;1表示点在180度视角内、但不在图片内,2表示点在图片内。
lines:(k,8),表示图中的k条线。每条线用七个数表示,前两个是端点在points中的序号,然后是线的类型:0是竖直的墙壁线,1是天花板线,2是地板线,然后是起点的x、y坐标,然后是终点的x、y坐标,然后是线在视图中的类型-0竖直线1水平线2过中心线
"""
points, point_types, imgXyz = coordE2P(e_label, e_img, fov_deg, u_deg, v_deg, out_hw, in_rot_deg)
lines = []
corner_count = e_label.shape[0]
# 定义三种交线。内含12个数组代表长方体的12条边。
# 每个数组的前两个元素对应着label中按顺序给定的点的序号。
# 第三个元素表示这条线的类别。0是竖直的墙壁线,1是天花板线,2是地板线。
LINES = []
for i in range(0, corner_count, 2):
LINES.append([i, i + 1, 0])
LINES.append([i, (i + 2) % corner_count, 1])
LINES.append([i + 1, (i + 3) % corner_count, 2])
for l in LINES:
if point_types[l[0]] > 0 and point_types[l[1]] > 0:
# 两个点都在相机前方180度视角范围内
lines.append(np.concatenate([l, points[l[0]], points[l[1]]]))
elif point_types[l[0]] == 2 or point_types[l[1]] == 2:
# 只有一个点在图像内、另一个点在相机后方
# 从连成的直线上找合适的点,这个点投影位于边线上
line = np.concatenate([l, points[l[0]], points[l[1]]])
if point_types[l[0]] == 2:
p1 = imgXyz[l[0]]
p2 = imgXyz[l[1]]
toFill = 1
else:
p1 = imgXyz[l[1]]
p2 = imgXyz[l[0]]
toFill = 0
tantheta = np.tan((180 - fov_deg[0]) / 2 * np.pi / 180)
# pc:p1和p2连线,与FOV平面的交点。k应该能保证在0.5~1之间。pc的x应该能保证非常接近边缘(1或-1)?
k1 = (p2[0] * tantheta - p2[2]) / ((p1[2] - p2[2]) - ((p1[0] - p2[0]) * tantheta))
k2 = (p2[0] * -tantheta - p2[2]) / ((p1[2] - p2[2]) - ((p1[0] - p2[0]) * -tantheta))
k = k1 if 0.5 <= k1 <= 1 else k2
pc = p1 * k + p2 * (1 - k)
assert 0.5 <= k <= 1 and pc[2] > 0, "k error"
pc = pc / pc[2]
assert -0.01 <= (abs(pc[0]) - 1) <= 0.01, "pc error"
# 把pc的坐标,往图像上映射
h_fov, v_fov = fov_deg[0] * np.pi / 180, fov_deg[1] * np.pi / 180
x_max = np.tan(h_fov / 2)
y_max = np.tan(v_fov / 2)
normed_pos = pc[:2] / np.array([x_max, -y_max], dtype=imgXyz.dtype) / 2 + 0.5
pos = normed_pos * np.array([out_hw[1] - 1, out_hw[0] - 1], dtype=imgXyz.dtype)
line[2 * toFill + 3:2 * toFill + 5] = pos # 把pc点的图像坐标填入
lines.append(line)
lines = linesPostProcess(lines, out_hw, v_deg != 0) # 去除看不见的线
result = {
"points": points,
"point_types": point_types,
"lines": lines
}
return result
| 7,147 | 36.036269 | 132 | py |
lld-public | lld-public-master/rectify_euroc.py | import cv2
import os
import sys
def prepare_rectifier(conf_path):
fs = cv2.FileStorage(conf_path, cv2.FILE_STORAGE_READ)
Kl = fs.getNode("LEFT.K").mat()
Dl = fs.getNode("LEFT.D").mat()
Rl = fs.getNode("LEFT.R").mat()
Pl = fs.getNode("LEFT.P").mat()
wl = 752
hl = 480
Kr = fs.getNode("RIGHT.K").mat()
Dr = fs.getNode("RIGHT.D").mat()
Rr = fs.getNode("RIGHT.R").mat()
Pr = fs.getNode("RIGHT.P").mat()
wr = 752
hr = 480
#cv::initUndistortRectifyMap(K_l,D_l,R_l,P_l.rowRange(0,3).colRange(0,3),cv::Size(cols_l,rows_l),CV_32F,*M1l_p,*M2l_p);
M1l, M2l = cv2.initUndistortRectifyMap(Kl, Dl, Rl, Pl[0:3, 0:3], (wl, hl), cv2.CV_32F)
M1r, M2r = cv2.initUndistortRectifyMap(Kr, Dr, Rr, Pr[0:3, 0:3], (wr, hr), cv2.CV_32F)
return M1l, M2l, M1r, M2r
def rectify_sequence(seq_path, out_path, M1, M2):
cnt = 0
for f in sorted(os.listdir(seq_path)):
if '.png' in f:
img = cv2.imread(seq_path + '/' + f)
img_out = cv2.remap(img, M1, M2, cv2.INTER_LINEAR)
lbl = str(cnt)
while len(lbl) < 6:
lbl = '0' + lbl
cv2.imwrite(out_path + '/' + lbl + '.png', img_out)
cnt += 1
def rectify_dataset(M1l, M2l, M1r, M2r, path_to_dataset, path_to_rect):
seqs = ["MH_01_easy", "MH_02_easy", "MH_03_medium", "MH_04_difficult", "MH_05_difficult", "V1_01_easy", "V1_02_medium", "V1_03_difficult", "V2_01_easy", "V2_02_medium", "V2_03_difficult"]
for si, s in enumerate(seqs):
if si < 5:
continue
slbl = str(si)
if len(slbl) < 2:
slbl = '0' + slbl
path_to_left = path_to_dataset + '/' + s + '/mav0/cam0/data/'
path_to_left_rect = path_to_rect + '/' + slbl + '/image_0/'
if not os.path.exists(path_to_left_rect):
os.makedirs(path_to_left_rect)
rectify_sequence(path_to_left, path_to_left_rect, M1l, M2l)
path_to_right = path_to_dataset + '/' + s + '/mav0/cam1/data/'
path_to_right_rect = path_to_rect + '/' + slbl + '/image_1/'
if not os.path.exists(path_to_right_rect):
os.makedirs(path_to_right_rect)
rectify_sequence(path_to_right, path_to_right_rect, M1r, M2r)
if __name__ == '__main__':
euroc_path = sys.argv[1]
out_path = sys.argv[2]
conf_path = sys.argv[3]
M1l, M2l, M1r, M2r = prepare_rectifier(conf_path)
rectify_dataset(M1l, M2l, M1r, M2r, euroc_path, out_path)
| 2,476 | 35.970149 | 191 | py |
lld-public | lld-public-master/infer.py | import numpy as np
import cv2
import torch
import torch.nn.functional as F
import pylbd
import matplotlib.pyplot as plt
import torch.nn as nn
class FeatureEncoder(nn.Module):
def initialize_l2(self, g=0):
if g==0:
g = 4
if self.depth == 2:
g = 2
if self.depth == 3:
g = 1
print 'initializing depth= '+str(self.depth)+ 'g='+str(g)
# g = 1
self.downsample_init = nn.Upsample(scale_factor=0.5, mode='bilinear')
if self.is_color:
self.conv1 = nn.Conv2d(3, 8 * g, kernel_size=3, stride=1, padding=1)
else:
self.conv1 = nn.Conv2d(1, 8 * g, kernel_size=3, stride=1, padding=1)
self.batch1 = nn.BatchNorm2d(8 * g)
self.conv2 = nn.Conv2d(8 * g, 8 * g, kernel_size=3, stride=1, padding=1)
self.batch2 = nn.BatchNorm2d(8 * g)
if self.depth > 0:
self.conv3 = nn.Conv2d(8 * g, 16 * g, kernel_size=3, stride=2, padding=1) # 1/2
else:
self.conv3 = nn.Conv2d(8 * g, 16 * g, kernel_size=3, stride=1, padding=1) # 1/2
self.batch3 = nn.BatchNorm2d(16 * g)
self.conv4 = nn.Conv2d(16 * g, 16 * g, kernel_size=3, stride=1, padding=1)
self.batch4 = nn.BatchNorm2d(16 * g)
k = 16 * g
if self.depth >= 2:
self.conv5 = nn.Conv2d(16 * g, 32 * g, kernel_size=3, stride=2, padding=1) # 1/4
self.batch5 = nn.BatchNorm2d(32 * g)
self.conv6 = nn.Conv2d(32 * g, 32 * g, kernel_size=3, padding=1)
self.batch6 = nn.BatchNorm2d(32 * g)
k = 32*g
if self.depth == 3:
self.conv61 = nn.Conv2d(32 * g, 64 * g, kernel_size=3, stride=2, padding=1) # 1/4
self.batch61 = nn.BatchNorm2d(64 * g)
self.conv62 = nn.Conv2d(64 * g, 64 * g, kernel_size=3, padding=1)
self.batch62 = nn.BatchNorm2d(64 * g)
k = 64*g
if self.is_skip:
self.convu1 = nn.Conv2d(64 * g, 64 * g, kernel_size=3, padding=1) #
self.batchu1 = nn.BatchNorm2d(64 * g)
# 1/4
self.convu2 = nn.Conv2d(64 * g, 64 * g, kernel_size=3, padding=1) # 1/4
self.batchu2 = nn.BatchNorm2d(64 * g)
self.convu3 = nn.Conv2d(64 * g, 64 * g, kernel_size=3, padding=1) # 1/4
self.batchu3 = nn.BatchNorm2d(64 * g)
# self.conv7 = nn.Conv2d(k, k, kernel_size=8, padding=0)
self.conv7 = nn.Conv2d(k, k, kernel_size=7, padding=3)
self.batch7 = nn.BatchNorm2d(k)
self.dropout = nn.Dropout2d()
if self.is_learnable:
self.deconv_64 = nn.ConvTranspose2d(k, k, stride=8, kernel_size=3)
self.deconv_32 = nn.ConvTranspose2d(k, k, stride=4, kernel_size=3)
self.deconv_16 = nn.ConvTranspose2d(k, k, stride=2, kernel_size=3)
else:
self.deconv_64 = nn.Upsample(scale_factor=8, mode='bilinear')
self.deconv_32 = nn.Upsample(scale_factor=4, mode='bilinear')
self.deconv_16 = nn.Upsample(scale_factor=2, mode='bilinear')
self.deconv_final = nn.Upsample(scale_factor=self.upscale_factor, mode='bilinear')
def __init__(self, is_cuda, is_color=False, upscale_factor=1, is_pyramid=True, depth=2, g=0,
is_learnable=False, is_skip = False):
super(FeatureEncoder, self).__init__()
self.margins = -1*torch.ones(2).float().cuda()
self.final_size = -1 * torch.ones(2).float().cuda()
self.is_learnable = is_learnable
self.depth = depth
self.upscale_factor = upscale_factor
self.is_color = is_color
self.is_skip = is_skip
print 'my net init start full'
self.initialize_l2(g)
# self._initialize_weights()
print 'init L2 done'
self.is_cuda = is_cuda
self.scale = 1.0
# self.is_plain = is_plain
self.is_pyramid = is_pyramid
self.is_test_mode = False
def forward(self, x):
# print 'start '+str(x.shape)
init_shape = x.shape
x = F.relu(self.batch1(self.conv1(x)), inplace=True)
# print 'b1 ' + str(x.shape)
x = F.relu(self.batch2(self.conv2(x)), inplace=True)
x2 = x
# print 'b2 ' + str(x.shape)
x = F.relu(self.batch3(self.conv3(x)), inplace=True)
# print 'b3 ' + str(x.shape)
x = F.relu(self.batch4(self.conv4(x)), inplace=True)
if (self.depth == 0):
if self.is_skip:
w = x2.shape[3]
h = x2.shape[2]
xm_1 = x[:, 0:x2.shape[1], 0:h, 0:w] + x2
xm_2 = x[:, x2.shape[1]:, :, :]
xm = torch.stack([xm_1, xm_2], dim=1)
xm = xm.view(x.shape)
x = xm
# x[:, 0:x2.shape[1], 0:h, 0:w] = x[:, 0:x2.shape[1], 0:h, 0:w] + x2
x = self.conv7(x)
return x
# print 'b4 ' + str(x.shape)
if (self.depth == 1):
x = self.batch7(self.conv7(x))
# print 'b7 ' + str(x.shape)
x = self.deconv_16(x)
if self.is_skip:
w = x2.shape[3]
h = x2.shape[2]
x[:, 0:x2.shape[1], 0:h, 0:w] = x[:, 0:x2.shape[1], 0:h, 0:w] + x2
x = self.deconv_final(x)
return x
x4 = x
x = F.relu(self.batch5(self.conv5(x)), inplace=True)
# print 'b5 ' + str(x.shape)
x = F.relu(self.batch6(self.conv6(x)), inplace=True)
# print 'b6 ' + str(x.shape)
if self.depth == 2:
x = self.batch7(self.conv7(x))
# print 'b7 ' + str(x.shape)
x = self.deconv_32(x)
x = self.deconv_final(x)
return x
x8 = x
x = self.batch61(self.conv61(x))
x = self.batch62(self.conv62(x))
x = self.batch7(self.conv7(x))
# print 'b7 ' + str(x.shape)
if self.is_skip:
# print('before ups '+str(x.shape))
x = F.upsample_bilinear(x, x8.shape[2:])
# print('after ups ' + str(x.shape))
x = x + x8.repeat(1, 2, 1, 1)
x = self.batchu1(self.convu1(x))
x = F.upsample_bilinear(x, x4.shape[2:])
# print(x4.shape)
x4r = x4.repeat(1, 4, 1, 1)
# print(x4r.shape)
x = x + x4r
x = self.batchu2(self.convu2(x))
x = F.upsample_bilinear(x, init_shape[2:])
x = x + x2.repeat(1,8,1,1)
x = self.batchu3(self.convu3(x))
else:
x = self.deconv_64(x)
x = self.deconv_final(x)
return x
def sample_descriptors(x, lines, w_img, h_img):
w_map = x.shape[3]
h_map = x.shape[2]
# print(x.shape)
m_x = (w_img-w_map)/2.0
m_y = (h_img-h_map)/2.0
# print(w_img)
# print(m_x)
lines_x_flat = lines[:, :, :, 0].contiguous().view(-1)
good_lines = (lines_x_flat > 0).nonzero()
# print(str(len(good_lines))+ ' ' + str(lines_x_flat.shape[0]))
lines_x_flat[good_lines] = lines_x_flat[good_lines] + m_x*torch.ones_like(lines_x_flat[good_lines])
lines_x_flat[good_lines] = 2.0/w_map*lines_x_flat[good_lines] - torch.ones_like(lines_x_flat[good_lines])
lines[:, :, :, 0] = lines_x_flat.view(lines[:, :, :, 0].shape)
lines_y_flat = lines[:, :, :, 1].contiguous().view(-1)
good_lines = (lines_y_flat > 0).nonzero()
lines_y_flat[good_lines] = lines_y_flat[good_lines] + m_y*torch.ones_like(lines_y_flat[good_lines])
lines_y_flat[good_lines] = 2.0 / h_map * lines_y_flat[good_lines] - torch.ones_like(lines_y_flat[good_lines])
lines[:, :, :, 1] = lines_y_flat.view(lines[:, :, :, 1].shape)
lds = F.grid_sample(x, lines, mode='bilinear', padding_mode='border')
avg_lds = torch.sum(lds, 3)
avg_lds = F.normalize(avg_lds, p=2, dim=1)
return avg_lds
def prepare_grid_numpy_vec(ld, s, pt_per_line):
cur_line_num = ld.shape[1]
if cur_line_num == 0:
return []
x_s = ld[0:2,:]
x_e = ld[2:4, :]
pts_lst = []
for j in range(0, pt_per_line):
c = (1.0+2*j)/(2*pt_per_line)
coordmat = s*(x_s*(1-c)+ x_e*c)# - m_rep
pts_lst.append(coordmat.transpose(1,0))
return np.stack(pts_lst, axis=2).transpose(0,2,1)
def prepare_input(img, lines, is_cuda):
img = img.reshape(1, 1, img.shape[0], img.shape[1])
img = np.asarray(img).astype(float)
img = torch.from_numpy(img).float()
lines = lines[:, 7:11].transpose()
lines = prepare_grid_numpy_vec(lines, 1.0, pt_per_line=5)
lines = lines.reshape(1, lines.shape[0], lines.shape[1], lines.shape[2])
lines = torch.from_numpy(lines).float()
if is_cuda:
img = img.cuda()
lines = lines.cuda()
return img, lines
def match_using_lbd(img1, img2, n_oct, factor):
gray1 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
lbd1, lines1 = pylbd.detect_and_describe(gray1, n_oct, factor)
lbd2, lines2 = pylbd.detect_and_describe(gray2, n_oct, factor)
matches = pylbd.match_lbd_descriptors(lbd1, lbd2)
debug_lbd = pylbd.visualize_line_matching(img1, lines1, img2, lines2, matches, True)
return debug_lbd
def test_line_matching(weights_path, is_cuda):
n_oct = 1
factor = 1.44
img1 = cv2.imread('kitti_8_left.png')
img2 = cv2.imread('kitti_8_right.png')
w_img = img1.shape[1]
h_img = img1.shape[0]
debug_lbd = match_using_lbd(img1, img2, n_oct, factor)
cv2.imwrite('test_lbd.png', debug_lbd)
plt.figure('LBD')
plt.imshow(debug_lbd)
encoder_net = FeatureEncoder(is_cuda=True, is_color=False, is_pyramid = False, depth=3, g=0)
if is_cuda:
encoder_net = encoder_net.cuda()
checkpoint = torch.load(weights_path)
encoder_net.load_state_dict(checkpoint['state_dict'])
gray1 = cv2.cvtColor(img1, cv2.COLOR_RGB2GRAY)
lines1 = pylbd.detect_edlines(gray1, n_oct, factor)
img1_torch, lines1_torch = prepare_input(gray1, lines1, is_cuda)
y = encoder_net(img1_torch)
d1 = sample_descriptors(y, lines1_torch, w_img, h_img).detach().cpu().numpy()
gray2 = cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY)
lines2 = pylbd.detect_edlines(gray2, n_oct, factor)
img2_torch, lines2_torch = prepare_input(gray2, lines2, is_cuda)
y2 = encoder_net(img2_torch)
d2 = sample_descriptors(y2, lines2_torch, w_img, h_img).detach().cpu().numpy()
nd1 = d1.shape[2]
nd2 = d2.shape[2]
match_lst = []
d1 = d1.reshape(64, -1).transpose()
d2 = d2.reshape(64, -1).transpose()
for i in range(0, nd1):
ld1 = d1[i]
min_dist = 1e10
best_match = 0
for j in range(0, nd2):
ld2 = d2[j]
dist = 2 - ld1.dot(ld2)
if dist < min_dist:
min_dist = dist
best_match = j
match_result = [i, best_match, 0, 1]
match_lst.append(match_result)
matches_lld = np.asarray(match_lst).astype(int)
debug_lld_img = pylbd.visualize_line_matching(img1, lines1, img2, lines2, matches_lld, True)
cv2.imwrite('test_lld.png', debug_lld_img)
plt.figure('LLD')
plt.imshow(debug_lld_img)
plt.show()
test_line_matching(weights_path = '/storage/projects/lld/1.pyh.tar', is_cuda = False) | 11,351 | 37.744027 | 113 | py |
lld-public | lld-public-master/prepare_kitti_euroc_combined.py | import os
import shutil
def combine_datasets(kitti_path, euroc_rect_path, target_path):
ds_def = [(0,0),
(0,1),
(0,2),
(0,3),
(0,4),
(1,0),
(1,1),
(1,3),
(1,5),
(1,7),
(0,7),
(1,9),
(0,6),
(0,5),
(0,8),
(0,9),
(0,10),
(1,2),
(1,4),
(1,6),
(1,8),
(1,10)]
for i, ds in enumerate(ds_def):
sc = ds[1]
lbl = str(sc)
if len(lbl) < 2:
lbl = '0' + lbl
tgt_lbl = str(i)
if len(tgt_lbl) < 2:
tgt_lbl = '0' + tgt_lbl
if ds[0] == 0: #kitti
shutil.copytree(kitti_path + '/' + lbl + '/',
target_path+ '/' + tgt_lbl)
else:
shutil.copytree(euroc_rect_path + '/' + lbl + '/',
target_path + '/' + tgt_lbl)
if __name__ == '__main__':
kitti_path = sys.argv[1]
euroc_rect_path = sys.argv[2]
out_path = sys.argv[3]
combine_datasets(kitti_path,
euroc_rect_path,
out_path)
| 1,267 | 22.924528 | 63 | py |
lld-public | lld-public-master/train.py | import torch
import data.batched as ba
import cnn.net_multibatch as nmb
import torch.optim as optim
import os
import train.multibatch_trainer as mbt
import tqdm
from torch.autograd import Variable
import numpy as np
dir_path = '../traindata/'
ptnum = 5
is_noisy = False
def get_net():
return nmb.FeatureEncoder(is_cuda=True, is_color=False, is_pyramid = False, depth=3, g=0).cuda()
def train_multibatch(se=-1):
train_loader, test_loader, test_loader_2 = ba.get_combined_training_v2(pt_per_line=ptnum, is_noisy=is_noisy)
encoder_net = get_net()
optimizer = optim.Adam(encoder_net.parameters(), lr = 1e-4)
mbt.run_training(dir_path, train_loader, test_loader, encoder_net, optimizer, is_triplet=False, start_epoch=se, vdseqid='7')
def eval_multibatch(ep_id):
train_loader, test_loader, test_loader2 = ba.get_combined_training(pt_per_line=ptnum)
encoder_net = get_net()
mbt.run_validation(dir_path, test_loader2, encoder_net, ep_id, is_triplet=False, vqseqid='9', is_save=True)
def test_with_descriptors_hetero(ep_id=0):
encoder_net = get_net()
kitti_ids = [8, 9, 10]
seq_ids_kitti = np.arange(14, 17)
euroc_ids = [2, 4, 6]
seq_ids_euroc = np.arange(17, 20)
seq_map = {}
for i in range(0, len(kitti_ids)):
seq_map[seq_ids_kitti[i]] = ('kitti', kitti_ids[i])
for i in range(0, len(euroc_ids)):
seq_map[seq_ids_euroc[i]] = ('euroc', euroc_ids[i])
mbt.run_test_heterogen(dir_path, encoder_net, seq_map, ep_id=ep_id, is_triplet=False, do_savedesc=True,
pt_per_line=ptnum)
train_multibatch()
eval_multibatch(1)
test_with_descriptors_hetero(1)
| 1,661 | 29.777778 | 128 | py |
lld-public | lld-public-master/cnn/__init__.py | 0 | 0 | 0 | py | |
lld-public | lld-public-master/cnn/net_multibatch.py | import torch.nn as nn
import torch
import torch.nn.functional as F
import tqdm
from torch.autograd import Variable
import torch.optim as optim
import time
import sklearn.metrics as metrics
import numpy as np
def compute_distances(x, pos_inds, neg_mask):
#x: b x C x N
#pos_mask: b x n_p, pos_inds: b x n_p, neg_mask b x N x np
pos_mask = (pos_inds >= 0).float()
pos_anti_mask = (pos_inds<0)
b, np = pos_inds.shape
C = x.shape[1]
pos_inds[pos_anti_mask] = 0
pos_inds_exp = pos_inds.view(b, 1, np).expand(-1, C, -1)
# x_pos = torch.index_select(x, 2, pos_inds_exp)
x_pos = torch.gather(x, 2, pos_inds_exp)
#x_pos b x C x n_p
x_pos1 = x_pos[1:, :, :].permute(0, 2, 1).contiguous().view(-1, C)
x_pos0 = x_pos[0]
x_pos_from = x_pos0.view(1, C, np).expand(b-1, -1, -1).permute(0, 2, 1).contiguous().view(-1, C)
# pos_mask_flat = pos_mask[1:,:].reshape(-1)
pos_mask_part = pos_mask[1:,:]
dists_pos = F.pairwise_distance(x_pos_from, x_pos1).view(b-1, np)
# d_pos = torch.sum(F.pairwise_distance(x_pos_from, x_pos1) * pos_mask_flat)
N = x.shape[2]
x_pos_from = x_pos0.view(1, 1, C, np).expand(b-1, N, -1, -1).permute(0, 1, 3, 2).contiguous().view(-1, C)
x_to = x[1:].permute(0, 2, 1).view(b-1, N, 1, C).expand(-1, -1, np, -1).contiguous().view(-1, C)
# print x_pos_from.shape
# print x_to.shape
# print b
# print N
dists_flat = F.pairwise_distance(x_pos_from, x_to)
dists = dists_flat.view(b-1, N, np)
neg_mask_part = neg_mask[1:].float()
max_dist = 2.0
dists_neg = dists * neg_mask_part + max_dist * (torch.ones_like(neg_mask_part) - neg_mask_part)
#dists_pos: b-1 x np
#pos_mask_part: b-1 x np
#dists_neg: b-1 x N x np
#neg_mask_part: b-1 x N x np
return dists_pos, pos_mask_part, dists_neg, neg_mask_part
def compute_trip_loss(x, pos_inds, neg_mask, m):
dists_pos, pos_mask_part, dists_neg, neg_mask_part = compute_distances(x, pos_inds, neg_mask)
# d_pos_flat = dists_pos.view(-1)[(pos_mask_part>0).view(-1)].detach().cpu().numpy()
# print('+ dists '+str(np.min(d_pos_flat))+' '+str(np.max(d_pos_flat)))
d_pos = torch.sum(dists_pos * pos_mask_part, 0)
# d_neg_flat = dists_neg.view(-1)[(neg_mask_part>0).view(-1)].detach().cpu().numpy()
# print('- dists ' + str(np.min(d_neg_flat)) + ' ' + str(np.max(d_neg_flat)))
# print(dists_neg.shape)
d1, a1 = torch.min(dists_neg, dim=0)
# print (d1.shape)
d_neg, a_neg = torch.min(d1, 0)
# print(d_neg.shape)
# m_var = torch.ones(1).cuda() * m
d_neg_diff = m - d_neg
# d_nd_np = d_neg_diff.detach().cpu().numpy()
# print('min neg dists '+str(np.min(d_nd_np))+' : '+str(np.max(d_nd_np)))
# print(d_neg_diff.shape)
mask = (d_neg_diff>=0).float()
# print((mask * d_neg_diff).shape)
# print(d_pos.shape)
return torch.sum(mask * d_neg_diff + d_pos)
def compute_hn_loss(x, pos_inds, neg_mask, m):
dists_pos, pos_mask_part, dists_neg, neg_mask_part = compute_distances(x, pos_inds, neg_mask)
# d_pos_flat = dists_pos.view(-1)[(pos_mask_part>0).view(-1)].detach().cpu().numpy()
# print('+ dists '+str(np.min(d_pos_flat))+' '+str(np.max(d_pos_flat)))
d_pos = torch.sum(dists_pos * pos_mask_part, 0)
# d_neg_flat = dists_neg.view(-1)[(neg_mask_part>0).view(-1)].detach().cpu().numpy()
# print('- dists ' + str(np.min(d_neg_flat)) + ' ' + str(np.max(d_neg_flat)))
# print(dists_neg.shape)
d1, a1 = torch.min(dists_neg, dim=0)
# print (d1.shape)
d_neg, a_neg = torch.min(d1, 0)
d_diff = d_pos - d_neg
# print(d_neg.shape)
# m_var = torch.ones(1).cuda() * m
d_shifted = d_diff + m
# d_nd_np = d_neg_diff.detach().cpu().numpy()
# print('min neg dists '+str(np.min(d_nd_np))+' : '+str(np.max(d_nd_np)))
# print(d_neg_diff.shape)
mask = (d_shifted>=0).float()
# print((mask * d_neg_diff).shape)
# print(d_pos.shape)
return torch.sum(mask * d_shifted)
def compute_distvecs(x, pos_inds, neg_mask):
dists_pos, pos_mask_part, dists_neg, neg_mask_part = compute_distances(x, pos_inds, neg_mask)
d_pos = np.zeros(0)
d_neg = np.zeros(0)
pos_mask_flat = pos_mask_part.view(-1)
dists_pos_flat = dists_pos.view(-1)
if pos_mask_flat.shape[0] > 0:
pos_inds = pos_mask_flat>0
d_pos = dists_pos_flat.view(-1)
if dists_pos_flat.shape[0]>0:
d_pos = d_pos[pos_inds]
if d_pos.shape[0]>0:
d_pos = d_pos.detach().cpu().numpy()
neg_mask_flat = dists_neg.view(-1)
if neg_mask_flat.shape[0] > 0:
neg_inds = neg_mask_flat<2.0
dists_neg_flat = dists_neg.view(-1)
if dists_neg_flat.shape[0]>0:
d_neg_t = dists_neg_flat[neg_inds]#.detach().cpu().numpy()
if d_neg_t.shape[0]>0:
d_neg = d_neg_t.detach().cpu().numpy()
return d_pos, d_neg
def sample_descriptors(x, lines, w_img, h_img):
w_map = x.shape[3]
h_map = x.shape[2]
# print(x.shape)
m_x = (w_img-w_map)/2.0
m_y = (h_img-h_map)/2.0
# print(w_img)
# print(m_x)
lines_x_flat = lines[:, :, :, 0].view(-1)
good_lines = (lines_x_flat > 0).nonzero()
# print(str(len(good_lines))+ ' ' + str(lines_x_flat.shape[0]))
lines_x_flat[good_lines] = lines_x_flat[good_lines] + m_x*torch.ones_like(lines_x_flat[good_lines])
lines_x_flat[good_lines] = 2.0/w_map*lines_x_flat[good_lines] - torch.ones_like(lines_x_flat[good_lines])
lines[:, :, :, 0] = lines_x_flat.view(lines[:, :, :, 0].shape)
lines_y_flat = lines[:, :, :, 1].view(-1)
good_lines = (lines_y_flat > 0).nonzero()
lines_y_flat[good_lines] = lines_y_flat[good_lines] + m_y*torch.ones_like(lines_y_flat[good_lines])
lines_y_flat[good_lines] = 2.0 / h_map * lines_y_flat[good_lines] - torch.ones_like(lines_y_flat[good_lines])
lines[:, :, :, 1] = lines_y_flat.view(lines[:, :, :, 1].shape)
lds = F.grid_sample(x, lines, mode='bilinear', padding_mode='border')
avg_lds = torch.sum(lds, 3)
avg_lds = F.normalize(avg_lds, p=2, dim=1)
return avg_lds
class FeatureEncoder(nn.Module):
def initialize_l2(self, g=0):
if g==0:
g = 4
if self.depth == 2:
g = 2
if self.depth == 3:
g = 1
print 'initializing depth= '+str(self.depth)+ 'g='+str(g)
# g = 1
self.downsample_init = nn.Upsample(scale_factor=0.5, mode='bilinear')
if self.is_color:
self.conv1 = nn.Conv2d(3, 8 * g, kernel_size=3, stride=1, padding=1)
else:
self.conv1 = nn.Conv2d(1, 8 * g, kernel_size=3, stride=1, padding=1)
self.batch1 = nn.BatchNorm2d(8 * g)
self.conv2 = nn.Conv2d(8 * g, 8 * g, kernel_size=3, stride=1, padding=1)
self.batch2 = nn.BatchNorm2d(8 * g)
if self.depth > 0:
self.conv3 = nn.Conv2d(8 * g, 16 * g, kernel_size=3, stride=2, padding=1) # 1/2
else:
self.conv3 = nn.Conv2d(8 * g, 16 * g, kernel_size=3, stride=1, padding=1) # 1/2
self.batch3 = nn.BatchNorm2d(16 * g)
self.conv4 = nn.Conv2d(16 * g, 16 * g, kernel_size=3, stride=1, padding=1)
self.batch4 = nn.BatchNorm2d(16 * g)
k = 16 * g
if self.depth >= 2:
self.conv5 = nn.Conv2d(16 * g, 32 * g, kernel_size=3, stride=2, padding=1) # 1/4
self.batch5 = nn.BatchNorm2d(32 * g)
self.conv6 = nn.Conv2d(32 * g, 32 * g, kernel_size=3, padding=1)
self.batch6 = nn.BatchNorm2d(32 * g)
k = 32*g
if self.depth == 3:
self.conv61 = nn.Conv2d(32 * g, 64 * g, kernel_size=3, stride=2, padding=1) # 1/4
self.batch61 = nn.BatchNorm2d(64 * g)
self.conv62 = nn.Conv2d(64 * g, 64 * g, kernel_size=3, padding=1)
self.batch62 = nn.BatchNorm2d(64 * g)
k = 64*g
if self.is_skip:
self.convu1 = nn.Conv2d(64 * g, 64 * g, kernel_size=3, padding=1) #
self.batchu1 = nn.BatchNorm2d(64 * g)
# 1/4
self.convu2 = nn.Conv2d(64 * g, 64 * g, kernel_size=3, padding=1) # 1/4
self.batchu2 = nn.BatchNorm2d(64 * g)
self.convu3 = nn.Conv2d(64 * g, 64 * g, kernel_size=3, padding=1) # 1/4
self.batchu3 = nn.BatchNorm2d(64 * g)
# self.conv7 = nn.Conv2d(k, k, kernel_size=8, padding=0)
self.conv7 = nn.Conv2d(k, k, kernel_size=7, padding=3)
self.batch7 = nn.BatchNorm2d(k)
self.dropout = nn.Dropout2d()
if self.is_learnable:
self.deconv_64 = nn.ConvTranspose2d(k, k, stride=8, kernel_size=3)
self.deconv_32 = nn.ConvTranspose2d(k, k, stride=4, kernel_size=3)
self.deconv_16 = nn.ConvTranspose2d(k, k, stride=2, kernel_size=3)
else:
self.deconv_64 = nn.Upsample(scale_factor=8, mode='bilinear')
self.deconv_32 = nn.Upsample(scale_factor=4, mode='bilinear')
self.deconv_16 = nn.Upsample(scale_factor=2, mode='bilinear')
self.deconv_final = nn.Upsample(scale_factor=self.upscale_factor, mode='bilinear')
def __init__(self, is_cuda, is_color=False, upscale_factor=1, is_pyramid=True, depth=2, g=0,
is_learnable=False, is_skip = False):
super(FeatureEncoder, self).__init__()
self.margins = -1*torch.ones(2).float().cuda()
self.final_size = -1 * torch.ones(2).float().cuda()
self.is_learnable = is_learnable
self.depth = depth
self.upscale_factor = upscale_factor
self.is_color = is_color
self.is_skip = is_skip
print 'my net init start full'
self.initialize_l2(g)
# self._initialize_weights()
print 'init L2 done'
self.is_cuda = is_cuda
self.scale = 1.0
# self.is_plain = is_plain
self.is_pyramid = is_pyramid
self.is_test_mode = False
def forward(self, x):
# print 'start '+str(x.shape)
init_shape = x.shape
x = F.relu(self.batch1(self.conv1(x)), inplace=True)
# print 'b1 ' + str(x.shape)
x = F.relu(self.batch2(self.conv2(x)), inplace=True)
x2 = x
# print 'b2 ' + str(x.shape)
x = F.relu(self.batch3(self.conv3(x)), inplace=True)
# print 'b3 ' + str(x.shape)
x = F.relu(self.batch4(self.conv4(x)), inplace=True)
if (self.depth == 0):
if self.is_skip:
w = x2.shape[3]
h = x2.shape[2]
xm_1 = x[:, 0:x2.shape[1], 0:h, 0:w] + x2
xm_2 = x[:, x2.shape[1]:, :, :]
xm = torch.stack([xm_1, xm_2], dim=1)
xm = xm.view(x.shape)
x = xm
# x[:, 0:x2.shape[1], 0:h, 0:w] = x[:, 0:x2.shape[1], 0:h, 0:w] + x2
x = self.conv7(x)
return x
# print 'b4 ' + str(x.shape)
if (self.depth == 1):
x = self.batch7(self.conv7(x))
# print 'b7 ' + str(x.shape)
x = self.deconv_16(x)
if self.is_skip:
w = x2.shape[3]
h = x2.shape[2]
x[:, 0:x2.shape[1], 0:h, 0:w] = x[:, 0:x2.shape[1], 0:h, 0:w] + x2
x = self.deconv_final(x)
return x
x4 = x
x = F.relu(self.batch5(self.conv5(x)), inplace=True)
# print 'b5 ' + str(x.shape)
x = F.relu(self.batch6(self.conv6(x)), inplace=True)
# print 'b6 ' + str(x.shape)
if self.depth == 2:
x = self.batch7(self.conv7(x))
# print 'b7 ' + str(x.shape)
x = self.deconv_32(x)
x = self.deconv_final(x)
return x
x8 = x
x = self.batch61(self.conv61(x))
x = self.batch62(self.conv62(x))
x = self.batch7(self.conv7(x))
# print 'b7 ' + str(x.shape)
if self.is_skip:
# print('before ups '+str(x.shape))
x = F.upsample_bilinear(x, x8.shape[2:])
# print('after ups ' + str(x.shape))
x = x + x8.repeat(1, 2, 1, 1)
x = self.batchu1(self.convu1(x))
x = F.upsample_bilinear(x, x4.shape[2:])
# print(x4.shape)
x4r = x4.repeat(1, 4, 1, 1)
# print(x4r.shape)
x = x + x4r
x = self.batchu2(self.convu2(x))
x = F.upsample_bilinear(x, init_shape[2:])
x = x + x2.repeat(1,8,1,1)
x = self.batchu3(self.convu3(x))
else:
x = self.deconv_64(x)
x = self.deconv_final(x)
return x
def net_train(loader, encoder_net, optimizer, is_triplet=True):
encoder_net.train()
avg_loss = 0
for data in tqdm.tqdm(loader):
x = Variable(data['images'].cuda(), requires_grad=False)
lines = Variable(data['lines'].cuda(), requires_grad=False)
# torch.cuda.synchronize()
# torch.cuda.synchronize()
# t0 = time.time()
y = encoder_net(x)
# torch.cuda.synchronize()
# torch.cuda.synchronize()
# t1 = time.time()
w_img = x[0].shape[2]
h_img = x[0].shape[1]
d = sample_descriptors(y, lines, w_img, h_img)
# d_np = d.detach().cpu().numpy()
# print(np.linalg.norm(d_np[0,:,0]))
pos_inds = Variable(data['positives'].cuda(), requires_grad=False)
neg_mask = Variable(data['negatives'].cuda(), requires_grad=False)
l = 0
if is_triplet:
l = compute_trip_loss(d, pos_inds, neg_mask, m=0.5)
else:
l = compute_hn_loss(d, pos_inds, neg_mask, m=0.5)
avg_loss += l.detach().cpu().numpy()
optimizer.zero_grad()
# print(l.detach().cpu().numpy())
l.backward()
# torch.cuda.synchronize()
# torch.cuda.synchronize()
# t2 = time.time()
# print 'fwd time '+str(t1 - t0)
# print 'bwd time ' + str(t2 - t1)
optimizer.step()
print('avg train loss '+str(avg_loss/len(loader)))
return encoder_net
def get_ap(all_pos, all_neg):
all_pos = np.concatenate(tuple(all_pos), axis=0)
y_true_pos = np.ones(all_pos.shape[0])
all_neg = np.concatenate(tuple(all_neg), axis=0)
y_true_neg = np.zeros(all_neg.shape[0])
y_true = np.concatenate((y_true_pos, y_true_neg), axis=0)
y_est = 1.0 - 0.5 * np.concatenate((all_pos, all_neg), axis=0)
curr_ap = metrics.average_precision_score(y_true, y_est)
return curr_ap
def net_test(loader, encoder_net, is_triplet = True, save_descs=False, save_folder = ''):
encoder_net.eval()
with torch.no_grad():
all_pos = []
all_neg = []
cnt = 0
avg_loss = 0
avg_time = 0
for data in tqdm.tqdm(loader):
x = Variable(data['images'].cuda(), requires_grad=False)
if len(x) == 0:
continue
# print('bs='+str(len(x)))
lines = Variable(data['lines'].cuda(), requires_grad=False)
torch.cuda.synchronize()
torch.cuda.synchronize()
t0 = time.time()
y = encoder_net(x)
w_img = x[0].shape[2]
h_img = x[0].shape[1]
d = sample_descriptors(y, lines, w_img, h_img)
torch.cuda.synchronize()
torch.cuda.synchronize()
t1 = time.time()
# print(str(t1 - t0) + ' sec')
l = 0
pos_inds = Variable(data['positives'].cuda(), requires_grad=False)
neg_mask = Variable(data['negatives'].cuda(), requires_grad=False)
if is_triplet:
l = compute_trip_loss(d, pos_inds, neg_mask, m=0.5)
else:
l = compute_hn_loss(d, pos_inds, neg_mask, m=0.5)
l = l.detach().cpu().numpy()
avg_loss += l
avg_time += t1-t0
# pos_inds = Variable(data['positives'].cuda(), requires_grad=False)
# neg_mask = Variable(data['negatives'].cuda(), requires_grad=False)
# d_pos, d_neg = compute_distvecs(d, pos_inds, neg_mask)
# cnt += 1
# all_pos.append(d_pos)
# all_neg.append(d_neg)
if save_descs:
img_ids = data['image_ids']
pair_ids = data['pair_ids']
ln_lens = data['ln_lens']
for ii in range(0, len(img_ids)):
img_id = img_ids[ii]
pair_id = pair_ids[ii]
f_out = open(save_folder + '/' + str(img_id) + '_' + str(pair_id) + '.txt', 'w')
lines_np = lines.detach().cpu().numpy()
descs_np = d.detach().cpu().numpy()
for lind in range(0, ln_lens[ii]):
# if lines_np[ii, lind, 0, 0] == 0 and lines_np[ii, lind, -1, 0] == 0:
# # print('breaking the cycle iid ' +str(img_id))
# # print(descs_np[ii, 0, lind:lind+10])
# break
dcur = descs_np[ii, :, lind].reshape(-1)
for j in range(0, len(dcur)):
f_out.write(str(dcur[j]) + ' ')
f_out.write('\n')
# if cnt % 100 == 0:
# curr_ap = get_ap(all_pos, all_neg)
# print(str(cnt/len(loader))+' ' + str(curr_ap))
# curr_ap = get_ap(all_pos, all_neg)
# print('final: ' + str(curr_ap)+' loss '+str(avg_loss/len(loader))+' time '+str(avg_time/len(loader)))
# return curr_ap
return 0
def net_inference(loader, encoder_net, save_descs=False, save_folder = ''):
encoder_net.eval()
with torch.no_grad():
all_pos = []
all_neg = []
cnt = 0
avg_loss = 0
avg_time = 0
for data in tqdm.tqdm(loader):
x = Variable(data['images'].cuda(), requires_grad=False)
if len(x) == 0:
continue
# print('bs='+str(len(x)))
lines = Variable(data['lines'].cuda(), requires_grad=False)
torch.cuda.synchronize()
torch.cuda.synchronize()
t0 = time.time()
y = encoder_net(x)
w_img = x[0].shape[2]
h_img = x[0].shape[1]
d = sample_descriptors(y, lines, w_img, h_img)
torch.cuda.synchronize()
torch.cuda.synchronize()
t1 = time.time()
avg_time += t1-t0
if save_descs:
img_ids = data['image_ids']
pair_ids = data['pair_ids']
ln_lens = data['ln_lens']
for ii in range(0, len(img_ids)):
img_id = img_ids[ii]
pair_id = pair_ids[ii]
f_out = open(save_folder + '/' + str(img_id) + '_' + str(pair_id) + '.txt', 'w')
descs_np = d.detach().cpu().numpy()
for lind in range(0, ln_lens[ii]):
dcur = descs_np[ii, :, lind].reshape(-1)
for j in range(0, len(dcur)):
f_out.write(str(dcur[j]) + ' ')
f_out.write('\n')
return 0
| 19,314 | 38.418367 | 113 | py |
lld-public | lld-public-master/train/multibatch_trainer.py | import torch
import os
import time
import numpy as np
import data.batched as ba
import torch.optim as optim
import cnn.net_multibatch as nmb
def compose_batch(batch):
batch = batch[0]
n = len(batch[0])
ims = np.asarray(batch[0]).astype(float)
ims = torch.from_numpy(ims).float()
lines = batch[1]
ln_lens = [l.shape[0] for l in lines]
ln_max = np.max(np.asarray(ln_lens))
pt_num = lines[0].shape[2]
lines_torch = torch.zeros(n, ln_max, pt_num, 2)
for i in range(0, n):
lines_torch[i, 0:lines[i].shape[0], :, :] = torch.from_numpy(lines[i].astype(float)).float()
negs = batch[2]
poss = batch[3]
n_pos = len(poss[0])
max_neg = max([max([len(neg_for_pos) for neg_for_pos in neg]) for neg in negs])
neg_t = -1*torch.ones(n, n_pos, max_neg)
for i in range(0, len(negs)):
for j in range(0, len(negs[i])):
neg_for_pos = negs[i][j]
if len(neg_for_pos)>0:
neg_t[i][j][0:len(neg_for_pos)] = torch.from_numpy(np.asarray(neg_for_pos).astype(long))
pos_t = -1*torch.ones(n, n_pos)
for i in range(0, len(poss)):
p = poss[i]
for j in range(0, len(p)):
if len(p[j]) == 1:
pos_t[i, j] = p[j][0]
if len(batch) == 4:
return ims, lines_torch, neg_t, poss # , batch[0][4], batch[0][5]
else:
return ims, lines_torch, neg_t, poss, batch[4], batch[5], batch[6]
def run_training(dir_path, train_loader, test_loader, encoder_net, optimizer, start_epoch=-1, is_triplet = True, vdseqid='7'):
if start_epoch >= 0:
checkpoint = torch.load(dir_path + '/' + str(start_epoch) + '.pyh.tar')
encoder_net.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print 'loaded epoch ' + str(start_epoch)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
for ep_id in range(start_epoch + 1, 8):
# nmb.net_test(train_loader, encoder_net)
encoder_net = nmb.net_train(train_loader, encoder_net, optimizer, is_triplet=is_triplet)
valdesc_folder = dir_path+"/val_descs_"+str(ep_id)+"/"+vdseqid+'/'
if not os.path.exists(valdesc_folder):
os.makedirs(valdesc_folder)
nmb.net_test(test_loader, encoder_net, save_descs=True, save_folder=valdesc_folder)
torch.save({
'epoch': ep_id,
'state_dict': encoder_net.state_dict(),
'optimizer': optimizer.state_dict(),
}, dir_path + '/' + str(ep_id) + '.pyh.tar')
def run_validation(dir_path, test_loader, encoder_net, ep_id, is_triplet = True, is_save=False, vqseqid=''):
# for ep_id in range(start_epoch + 1, 10):
checkpoint = torch.load(dir_path + '/' + str(ep_id) + '.pyh.tar')
encoder_net.load_state_dict(checkpoint['state_dict'])
# optimizer.load_state_dict(checkpoint['optimizer'])
print 'loaded epoch ' + str(ep_id)
val_descs_save_path = dir_path+'/val_descs_'+str(ep_id)+'/'+vqseqid+'/'
if not os.path.exists(val_descs_save_path):
os.makedirs(val_descs_save_path)
curr_ap = nmb.net_test(test_loader, encoder_net, is_triplet=is_triplet, save_descs=is_save, save_folder=val_descs_save_path )
rep_txt = open(dir_path + '/report.txt', 'a')
rep_txt.write(str(ep_id) + ':' + str(curr_ap) + '\n')
def run_test_heterogen(dir_path, encoder_net, seq_map, ep_id=0, is_triplet = True, do_savedesc=True, pt_per_line=5, n_lim=6):
checkpoint = torch.load(dir_path + '/' + str(ep_id) + '.pyh.tar')
encoder_net.load_state_dict(checkpoint['state_dict'])
print 'loaded epoch ' + str(ep_id)
for seq_id in seq_map:
seq_type, seq_save_code = seq_map[seq_id]
test_loader = ba.get_combined_test(seq_id, pt_per_line, n_lim)
if seq_type == 'kitti':
save_dir = dir_path + '/descs' + '/' + str(seq_save_code) + '/'
else:
save_dir = dir_path + '/descs_euroc' + '/' + str(seq_save_code) + '/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
nmb.net_test(test_loader, encoder_net, is_triplet = is_triplet, save_descs=do_savedesc, save_folder = save_dir)
def run_inference(dir_path, encoder_net, ep_id=0, do_savedesc=True):
checkpoint = torch.load(dir_path + '/' + str(ep_id) + '.pyh.tar')
encoder_net.load_state_dict(checkpoint['state_dict'])
print 'loaded epoch ' + str(ep_id)
inf_loader = ba.get_loader()
nmb.net_inference(inf_loader, encoder_net, save_descs=do_savedesc, save_folder=dir_path+'/inf_descs')
| 4,564 | 41.268519 | 129 | py |
lld-public | lld-public-master/train/__init__.py | 0 | 0 | 0 | py | |
lld-public | lld-public-master/data/line_sampler.py | import numpy as np
import torch
import time
def prepare_line_grid(lines_pair, margins, s, map_size, is_plain, pt_per_line):
line_grid = prepare_grid_plain(lines_pair, margins, s, is_plain, pt_per_line)
for i in range(0, 2):
line_grid[:, :, :, i] -= 0.5 * map_size[i]
line_grid[:, :, :, i] /= 0.5 * map_size[i]
return line_grid
def sample_line(linedata, line_grid, i, li, s, margins, is_plain, pt_per_line):
segs = []
seg_num = 2
if is_plain:
seg_num = 1
for si in range(0, seg_num):
seg = linedata[4 * si: 4 * si + 4]
if np.linalg.norm(seg) > 0:
segs.append(seg)
if len(segs) == 0:
return
seg_main = segs[0]
x_s = np.float32(seg_main[0:2])
x_e = np.float32(seg_main[2:4])
dx = x_e - x_s
pt_step = 1.0 / pt_per_line * dx
# print 'line points start '+str(x_s) + ' end ' + str(x_e) + ' step ' + str(pt_step) + ' per line ' + str(self.pt_per_line)
for pti in range(0, pt_per_line):
pt_x = x_s + (0.5 + pti) * pt_step
line_grid[i, li, pti, :] = s * pt_x - margins
def prepare_grid_vectorized_numpy(lines_pair, margins, s, pt_per_line):
line_num = np.max([lines_pair[0].shape[1], lines_pair[1].shape[1]])
line_grid = np.zeros((2, line_num, pt_per_line, 2))
margins_vec = np.asarray(margins).reshape((2, 1))
for i in range(0, 2):
ld = lines_pair[i]
cur_line_num = ld.shape[1]
if cur_line_num == 0:
continue
x_s = ld[0:2,:]
x_e = ld[2:4, :]
for j in range(0, pt_per_line):
c = (1.0+2*j)/(2*pt_per_line)
m_rep = np.tile(margins_vec, (1, x_s.shape[1]))
line_grid[i, 0:cur_line_num, j, :] = np.transpose(s*(x_s*(1-c)+ x_e*c) - m_rep, (1,0))
return line_grid
def prepare_grid_numpy_vec(ld, s, pt_per_line):
coordmats_lst = []
cur_line_num = ld.shape[1]
if cur_line_num == 0:
return []
x_s = ld[0:2,:]
x_e = ld[2:4, :]
pts_lst = []
for j in range(0, pt_per_line):
t0 = time.time()
c = (1.0+2*j)/(2*pt_per_line)
# m_rep = margins.view(2,1).expand(-1, x_s.shape[1])
coordmat = s*(x_s*(1-c)+ x_e*c)# - m_rep
# line_grid[i, 0:cur_line_num, j, :] = coordmat.permute(1,0)
pts_lst.append(coordmat.transpose(1,0))
t1= time.time()
# print 'point compute takes '+str(t1-t0)
return np.stack(pts_lst, axis=2).transpose(0,2,1)
# allpts = torch.stack(pts_lst, dim = 2).permute(0,2,1)
def prepare_grid_vectorized(lines_pair, margins, s, pt_per_line):
# line_num = torch.max([lines_pair[0].shape[1], lines_pair[1].shape[1]])
# line_num = lines_pair.shape[2]
t0 = time.time()
# line_grid = torch.zeros((2, line_num, pt_per_line, 2)).float().cuda()
t1 = time.time()
# print 'line grid takes '+str(t1-t0)
coordmats_lst = []
for i in range(0, 2):
ld = lines_pair[i]
cur_line_num = ld.shape[1]
if cur_line_num == 0:
continue
x_s = ld[0:2,:]
x_e = ld[2:4, :]
pts_lst = []
for j in range(0, pt_per_line):
t0 = time.time()
c = (1.0+2*j)/(2*pt_per_line)
# m_rep = margins.view(2,1).expand(-1, x_s.shape[1])
coordmat = s*(x_s*(1-c)+ x_e*c)# - m_rep
# line_grid[i, 0:cur_line_num, j, :] = coordmat.permute(1,0)
pts_lst.append(coordmat.permute(1,0))
t1= time.time()
# print 'point compute takes '+str(t1-t0)
allpts = torch.stack(pts_lst, dim = 2).permute(0,2,1)
coordmats_lst.append(allpts)
line_grid = torch.stack(coordmats_lst, dim=0)
return line_grid
def prepare_grid_plain(lines_pair, margins, s, is_plain, pt_per_line):
line_num = np.max([lines_pair[0].shape[1], lines_pair[1].shape[1]])
line_grid = np.zeros((2, line_num, pt_per_line, 2))
for i in range(0, 2):
for li in range(0, lines_pair[i].shape[1]):
linedata = lines_pair[i][:, li]
sample_line(linedata, line_grid, i, li, s, margins, is_plain, pt_per_line)
return line_grid
| 4,132 | 35.901786 | 127 | py |
lld-public | lld-public-master/data/batched.py | import os
import cv2
from torch.utils.data import Dataset
import numpy as np
import sys
import random
import torch
import line_sampler
def add_noise(im):
return im.astype(float) + 30*np.random.randn(im.shape[0], im.shape[1])
def get_image_id(call_id, f1):
mtd = 5
main_id = 5 * call_id
pair_id = -1
img_id = -1
if f1 < 4:
pair_id = np.mod(f1, 2)
img_id = np.floor(f1 / 2) + main_id + 5
return int(img_id), pair_id
else:
fm = np.floor((f1 - 4) / 2)
cnt = 0
for j in range(1, mtd + 1):
for k in range(-1, 2, 2):
i = main_id + 5 + k * j
if (i == main_id + 5 or i == main_id + 6):
continue
if cnt == fm:
img_id = i
pair_id = np.mod(f1, 2)
return int(img_id), pair_id
cnt += 1
return -1, -1
def compose_batch(batch):
# print('Composer!')
batch = batch[0]
n = len(batch['images'])
ims = np.asarray(batch['images']).astype(float)
# print('image range')
# print(np.max(ims))
# print(np.min(ims))
ims = torch.from_numpy(ims).float()
lines = batch['lines']
ln_lens = [l.shape[0] for l in lines]
if len(ln_lens)>0:
ln_max = np.max(np.asarray(ln_lens))
pt_num = lines[0].shape[1]
else:
ln_max = 0
pt_num = 0
n = 0
lines_torch = torch.zeros(n, ln_max, pt_num, 2)
for i in range(0, n):
lines_torch[i, 0:lines[i].shape[0], :, :] = torch.from_numpy(lines[i].astype(float)).float()
negs = batch['negatives']
poss = batch['positives']
if n > 0:
n_pos = len(poss[0])
max_neg = max([ni.shape[0] for ni in negs])
else:
max_neg = 0
n_pos = 0
neg_mask = -1*torch.ones(n, max_neg, n_pos).long()
if len(negs)>0:
for neg_i in negs:
neg_mask[i][0:neg_i.shape[0]] = torch.from_numpy(neg_i).long()
pos_t = -1*torch.ones(n, n_pos).long()
for i in range(0, len(poss)):
p = poss[i]
for j in range(0, len(p)):
if len(p[j]) == 1:
pos_t[i, j] = p[j][0]
fin_dict = batch
fin_dict['images'] = ims
fin_dict['lines'] = lines_torch
fin_dict['negatives'] = neg_mask
fin_dict['positives'] = pos_t
fin_dict['ln_lens'] = ln_lens
return fin_dict
def compose_infer_batch(batch):
n = len(batch)
ims = [b['images'] for b in batch]
ims = np.asarray(ims).astype(float)
ims = torch.from_numpy(ims).float()
lines = [b['lines'] for b in batch]
ln_lens = [l.shape[0] for l in lines]
if len(ln_lens)>0:
ln_max = np.max(np.asarray(ln_lens))
pt_num = lines[0].shape[1]
else:
ln_max = 0
pt_num = 0
n = 0
lines_torch = torch.zeros(n, ln_max, pt_num, 2)
# print(len(lines))
for i in range(0, n):
lines_torch[i, 0:lines[i].shape[0], :, :] = torch.from_numpy(lines[i].astype(float)).float()
fin_dict = {}
fin_dict['images'] = ims
fin_dict['lines'] = lines_torch
fin_dict['ln_lens'] = ln_lens
fin_dict['image_ids'] = [b['image_ids'] for b in batch]
fin_dict['pair_ids'] = [b['pair_ids'] for b in batch]
return fin_dict
def normalize_lines_for_gridsampler(im, li):
w = im.shape[2]
h = im.shape[1]
l = li.astype(float)
# print ('min - max x before ' + str(np.min(l[0, :]))+' ' + str(np.max(l[0, :])))
l[0, :] = (l[0, :] - w / 2.0) / (w / 2.0)
# print ('min - max x after ' + str(np.min(l[0, :])) + ' ' + str(np.max(l[0, :])))
# print ('2 min - max x before ' + str(np.min(l[2, :])) + ' ' + str(np.max(l[2, :])))
l[2, :] = (l[2, :] - w / 2.0) / (w / 2.0)
# print ('2 min - max x after ' + str(np.min(l[2, :])) + ' ' + str(np.max(l[2, :])))
l[1, :] = (l[1, :] - h / 2.0) / (h / 2.0)
l[3, :] = (l[3, :] - h / 2.0) / (h / 2.0)
return li
#Dataset description v1 (07.2018)
#lines: 5 x N matrix of ushort, with N number of lines in the image, and the rows are start_x, start_y, end_x, end_y, octave
#matching: N x M of uchar, where N is the number of detections in the frame, M is the number of tracks. Values are: 0 - not a match, 1 - possible match, 2 - detection from track
class DatasetBatchIndexer:
def prepare_pyramid_listed(self, im):
ims = []
for i in range(0, self.pyramid_levels):
ims.append(im)
im = cv2.resize(im, (int(np.floor(1.0/self.resize_factor*im.shape[1]+0.5)), int(np.floor(1.0/self.resize_factor*im.shape[0]+0.5))))
return ims
def read_batch(self, data):
frame_num = data.shape[0] / 8
line_num = data.shape[1]
# batch_data = np.zeros((2, 2, 2, line_num, frame_num), dtype=np.uint16)
line_lst = []
for li in range(0, line_num):
line_projs = []
for fi in range(0, frame_num):
dets = []
for di in range(0, 2): # detection index
seg = data[8 * fi + 4 * di: 8 * fi + 4 * di + 4, li]
if np.linalg.norm(seg) > 0:
dets.append(seg)
line_projs.append(dets)
line_lst.append(line_projs)
return line_lst
# for pti in range(0, 2): #pt start - end
# for ci in range(0, 2): #coordinate x-y
# batch_data[ci, pti, di, li, fi] = data[8*fi + 4*di + 2*pti + ci, li]
# return batch_data
def read_negatives(self, data):
neg_data = []
for fi in range(0, data.shape[0] / 8):
frame_lines = []
i = 0
while i < data.shape[1] and np.linalg.norm(data[:, i]) > 0:
line_seq = []
for di in range(0, 2):
if np.linalg.norm(data[8 * fi + 4 * di: 8 * fi + 4 * di + 4, i]) == 0:
continue
pt_start = data[8 * fi + 4 * di: 8 * fi + 4 * di + 2, i]
pt_end = data[8 * fi + 4 * di + 2:8 * fi + 4 * di + 4, i]
seg_data = data[8 * fi + 4 * di: 8 * fi + 4 * di + 4, i]
# line_seq.append([pt_start, pt_end])
line_seq.append(seg_data)
frame_lines.append(line_seq)
i += 1
neg_data.append(frame_lines)
return neg_data
def read_exceptions(self, data):
fnum = data.shape[0] / 10
batch_exc = []
for fi in range(0, fnum):
lnum = data.shape[1]
frame_exc = []
for li in range(0, lnum):
ei = 0
line_excs = []
while ei < 10 and data[10 * fi + ei, li] > 0:
line_excs.append(data[10 * fi + ei, li])
ei += 1
frame_exc.append(line_excs)
batch_exc.append(frame_exc)
return batch_exc
def __init__(self, kitti_root_dir, root_dir, seq_start=-1, seq_end=-1, seq_inds = [], is_color = False, is_pyramid = False, is_careful = True,
is_report_dist = False, pt_per_line = 5, do_sample_points = True, is_add_noise=False):
self.kitti_root_dir = kitti_root_dir
self.root_dir = root_dir
self.do_sample_points = do_sample_points
self.is_color = is_color
self.is_pyramid = is_pyramid
self.is_careful = is_careful
self.is_report_dist = is_report_dist
self.pyramid_levels = 4
self.resize_factor = 1.44
self.pt_per_line = pt_per_line
self.is_add_noise = is_add_noise
self.positives = {}
self.negatives = {}
self.exceptions = {}
self.exp_nums = {}
self.exp_num = 0
if len(seq_inds) == 0:
seq_inds = np.arange(seq_start, seq_end+1)
self.seq_inds = seq_inds
for si in range(0, len(seq_inds)):
seq_id = seq_inds[si]
seq_dir = root_dir + '/' + str(seq_id) + '/'
seq_data = {}
neg_data = {}
exc_data = {}
cnt = 0
flist = os.listdir(seq_dir)
self.exp_nums[si] = len(flist)
self.exp_num += len(flist)
def compose_frame_data(self, seq_id, call_id, f1):
pref = self.root_dir + '/' + str(seq_id) + '/' + str(call_id * 5) + '/' + str(f1)
line_data = cv2.imread(pref + '_l.png', -1)
if line_data is None:
line_data = np.zeros((0,0))
matches = cv2.imread(pref + '_m.png', -1)
if matches is None:
matches = np.zeros((0, 0))
negs_all = []
pos_all = []
for i in range(0, matches.shape[1]):
if self.is_careful:
negs = np.nonzero(matches[:, i] == 255)[0]
else:
negs = np.nonzero(matches[:, i] != 2)[0]
negs_all.append(negs)
poss = np.nonzero(matches[:, i] == 2)[0]
# print poss
pos_all.append(poss)
return line_data, negs_all, pos_all
def compose_frame_data_w_mask(self, seq_id, call_id, f1):
pref = self.root_dir + '/' + str(seq_id) + '/' + str(call_id * 5) + '/' + str(f1)
line_data = cv2.imread(pref + '_l.png', -1)
if line_data is None:
line_data = np.zeros((0,0))
matches = cv2.imread(pref + '_m.png', -1)
if matches is None:
matches = np.zeros((0, 0))
negs_all = []
pos_all = []
negs = (matches == 255).astype(long) #N x n_p
for i in range(0, matches.shape[1]):
poss = np.nonzero(matches[:, i] == 2)[0]
pos_all.append(poss)
return line_data, negs, pos_all
def get_label(self, i, tl):
s = str(i)
while len(s) < tl:
s = '0' + s
return s
def get_image_gs(self, call_id, seq_id, f1):
iid, pid = get_image_id(call_id, f1)
# self.img_ids.append(iid)
# self.pair_ids.append(pid)
if iid < 0 or pid < 0:
print 'error loading image'
if self.is_color:
pid = pid+2
lbl = self.kitti_root_dir + '/' + self.get_label(seq_id, 2) + '/image_' + str(pid) + '/' + \
self.get_label(iid,6) + '.png'
if self.is_color:
return cv2.imread(lbl)
else:
return cv2.imread(lbl, 0)
def format_image(self, im1):
im_size = [0,0]
lu = [0,0]
ims1 = []
if self.is_pyramid:
im_shape = im1.shape
ims1 = self.prepare_pyramid_listed(im1)
total_width = 0
for im in ims1:
total_width += im.shape[1]
lus1 = []
x = 0
for i in range(0, len(ims1)):
height = ims1[i].shape[0]
width = ims1[i].shape[1]
im_size = [height, width]
lu = [0, x]
x += width
else:
if self.is_color:
im1 = np.transpose(im1, (2, 0, 1))
else:
im1 = im1.reshape((1, im1.shape[0], im1.shape[1]))
im_size = im1.shape[1:3]
lu = [0, 0]
ims1 = im1
return ims1, im_size, lu
def format_images(self, im1, im2):
ims = []
im_sizes = []
lus = []
if self.is_pyramid:
im_shape = im1.shape
ims1 = self.prepare_pyramid_listed(im1)
ims2 = self.prepare_pyramid_listed(im2)
total_width = 0
for im in ims1:
total_width += im.shape[1]
ims = np.zeros((2, im1.shape[0], total_width), dtype=np.uint8)
lus1 = []
lus2 = []
x = 0
for i in range(0, len(ims1)):
height = ims1[i].shape[0]
width = ims1[i].shape[1]
ims[0, 0:height, x:x+width] = ims1[i]
ims[1, 0:height, x:x+width] = ims2[i]
im_sizes.append([height, width])
lus1.append([0, x])
lus2.append([0, x])
x += width
ims = np.asarray(ims)
ims = ims.reshape((2, 1, ims.shape[1], ims.shape[2]))
lus = [lus1, lus2]
else:
ims = [im1, im2]
ims = np.asarray(ims)
if self.is_color:
ims = np.transpose(ims, (0, 3, 1, 2))
else:
ims = ims.reshape((2,1,ims.shape[1], ims.shape[2]))
im_sizes.append(im1.shape[0:2])
lus = [[0,0], [0,0]]
return ims, im_sizes, lus
def format_lines_for_image_pyr(self, l, lus, im_sizes):
for i in range(0, l.shape[1]):
oct_ind = l[4, i]
if oct_ind > 0:
lu = lus[oct_ind]
k_h = float(im_sizes[oct_ind][0]) / im_sizes[0][0]
k_w = float(im_sizes[oct_ind][1]) / im_sizes[0][1]
l[0, i] = l[0, i] * k_w + lu[1]
l[2, i] = l[2, i] * k_w + lu[1]
l[1, i] = l[1, i] * k_h + lu[0]
l[3, i] = l[3, i] * k_h + lu[0]
return l
def format_lines(self, l_lst, im_sizes, lus):
if self.is_pyramid:
for ii in range(0,2):
l = l_lst[ii]
l_lst[ii] = self.format_lines_for_image_pyr(l, lus[ii], im_sizes)
return l_lst
else:
return l_lst
def normalize_points_for_gridsampler(self, im, li):
w = im.shape[2]
h = im.shape[1]
l = li.astype(float)
l[:, 0, :] = (l[:, 0, :] - w / 2.0) / (w / 2.0)
l[:, 1, :] = (l[:, 1, :] - h / 2.0) / (h / 2.0)
# print ('min - max x before ' + str(np.min(l[0, :]))+' ' + str(np.max(l[0, :])))
return l
# if do_sample_lines and not line_data.shape[0] == 0:
# def form_neg_mask(self,all_negs):
#
def get_multi_frame_batch(self, si, call_id, f_lst):
seq_id = self.seq_inds[si]
all_negs = []
all_pos = []
all_lines = []
self.img_ids = []
self.pair_ids = []
ims = []
ds = []
for f in f_lst:
iid, pid = get_image_id(call_id, f)
self.img_ids.append(iid)
self.pair_ids.append(pid)
line_data, negs_1, pos_1 = self.compose_frame_data_w_mask(seq_id, call_id, f)
if len(pos_1)==0:
all_negs.append([])
all_pos.append(None)
all_lines.append([])
ims.append([])
ds.append(-1)
continue
im1 = self.get_image_gs(call_id, seq_id, f)
if self.is_add_noise:
im1 = add_noise(im1)
im1, im_size1, lu1 = self.format_image(im1)
if self.do_sample_points:
line_data = line_sampler.prepare_grid_numpy_vec(line_data, 1.0, self.pt_per_line)
line_data = self.format_lines(line_data, im_size1, lu1)
# line_data = self.normalize_points_for_gridsampler(im1, line_data)
else:
# line_data = self.normalize_lines_for_gridsampler(im1, line_data)
line_data = self.format_lines(line_data, im_size1, lu1)
all_negs.append(negs_1)
all_pos.append(pos_1)
all_lines.append(line_data)
ims.append(im1)
ds.append(f)
# all_inds = f_lst
# print ('we sampled '+str(len(all_inds)))
# print(all_inds)
# rand_inds = np.random.choice(inds, n_lim-1, replace=False)
# print(rand_inds)
# all_inds = list(np.concatenate([np.zeros((1), dtype=int), rand_inds]))
# def subfilter(lst, inds):
# print inds
# return [lst[i] for i in inds]
fin_dict = {}
fin_dict['images'] = ims
# print(len(fin_dict['images']))
fin_dict['lines'] = all_lines
fin_dict['negatives'] = all_negs
fin_dict['positives'] = all_pos
# if self.is_report_dist:
fin_dict['distances'] = ds
fin_dict['image_ids'] = self.img_ids
fin_dict['pair_ids'] = self.pair_ids
#filter empty images
p_fin = []
neg_fin = []
lines_fin = []
ims_fin = []
iids_fin = []
pids_fin = []
ds_fin = []
for cnt in range(0, len(fin_dict['positives'])):
p = fin_dict['positives'][cnt]
if p is None:
continue
else:
p_fin.append(p)
neg_fin.append(fin_dict['negatives'][cnt])
lines_fin.append(fin_dict['lines'][cnt])
ims_fin.append(fin_dict['images'][cnt])
iids_fin.append(fin_dict['image_ids'][cnt])
pids_fin.append(fin_dict['pair_ids'][cnt])
ds_fin.append(fin_dict['distances'][cnt])
fin_dict['positives'] = p_fin
fin_dict['negatives'] = neg_fin
fin_dict['lines'] = lines_fin
fin_dict['images'] = ims_fin
fin_dict['image_ids'] = iids_fin
fin_dict['pair_ids'] = pids_fin
fin_dict['distances'] = ds_fin
return fin_dict
#we return
#a) width-concatenated images in the array of size 2xCxWxH
#b) line coords in a following format
# [L1, L2], where each Li is an array of size 5 x Ni, Ni is the number of lines, and each column is [sx, sy, ex, ey, ii],
# where (sx, sy) and (ex, ey) and line endpoints and ii is an image index to sample the line
#c) pos_matches - a list of pairs such as (i,j) where i is an index of a line in L1, j is an index
# of a line in L2 and these lines should have close descriptors. If i or j empty, no match in this image
#d) neg_matches - a list (N1, N2), each Ni is a list of negative matches, where at a a place k there is
# a list of negative matches for a positive pair number k in the list Li
def get_dual_frame_batch(self, seq_id, call_id, f1, f2, dwn_factor=1.0):
self.call_id = call_id
self.f1 = f1
self.f2 = f2
t0 = cv2.getCPUTickCount()
lines_1, negs_1, pos_1 = self.compose_frame_data(seq_id, call_id, f1)
lines_2, negs_2, pos_2 = self.compose_frame_data(seq_id, call_id, f2)
t1 = cv2.getCPUTickCount()
pos_matches = []
for p1i in range(0, len(pos_1)):
p1 = pos_1[p1i]
if len(pos_2) <= p1i:
p2 = []
else:
p2 = pos_2[p1i]
pos_matches.append([p1, p2])
if not dwn_factor == 1.0:
lines_1 = dwn_factor * lines_1
lines_2 = dwn_factor * lines_2
lines = [lines_1, lines_2]
neg_matches = [negs_1, negs_2]
t2 = cv2.getCPUTickCount()
self.img_ids = []
self.pair_ids = []
self.seq_id = seq_id
im1 = self.get_image_gs(call_id, seq_id, f1)
im2 = self.get_image_gs(call_id, seq_id, f2)
t3 = cv2.getCPUTickCount()
dsize = (0,0)
im1 = cv2.resize(im1, dsize, fx=dwn_factor, fy=dwn_factor, interpolation=cv2.INTER_LINEAR)
im2 = cv2.resize(im2, dsize, fx=dwn_factor, fy=dwn_factor, interpolation=cv2.INTER_LINEAR)
# ims, im_sizes, lus = self.format_images(im1, im2)
ims1, im_size1, lu1 = self.format_image(im1)
ims2, im_size2, lu2 = self.format_image(im2)
ims = np.stack((ims1, ims2), axis=0)
im_sizes = im_size1
lus = [lu1, lu2]
lines = self.format_lines(lines, im_sizes, lus)
lines = [normalize_lines_for_gridsampler(ims[0], lines[0]),
normalize_lines_for_gridsampler(ims[1], lines[1])]
if self.is_report_dist:
return ims, lines, neg_matches, pos_matches, f2-f1, self.img_ids, self.pair_ids
return ims, lines, neg_matches, pos_matches
class DatasetBatch(Dataset):
def __init__(self, kitti_root_dir, root_dir, seq_inds =[], test_mode=False, is_color=False, is_pyramid=False,
is_careful=True, dwn_factor=1.0, is_rep_dist=False, batch_mode=False, full_pass=False, pt_per_line=5,
n_lim=6, is_noisy=False):
self.indexer = DatasetBatchIndexer(kitti_root_dir, root_dir, seq_inds=seq_inds, is_color=is_color, is_pyramid=is_pyramid,
is_careful=is_careful, is_report_dist=is_rep_dist, pt_per_line=pt_per_line, is_add_noise = is_noisy)
self.dwn_factor = dwn_factor
self.bs = 22-1
self.exp_nums = {}
self.exp_num = 0
self.seq_ids = []
self.test_mode = test_mode
self.batch_mode = batch_mode
self.full_pass = full_pass
if batch_mode:
self.n_lim = n_lim
all_inds = np.arange(0, 22)
n = int(len(all_inds)/self.n_lim) + 1
self.part_inds = []
for i in range(0, n):
curr_ind = i * self.n_lim
i_max = curr_ind+self.n_lim
if i_max >= len(all_inds):
i_max = len(all_inds)
self.part_inds.append(all_inds[curr_ind:i_max])
for i in self.indexer.exp_nums:
n = self.indexer.exp_nums[i]
# if test_mode:
self.exp_nums[i] = n * self.bs #* (self.bs - 1) / 2
if self.batch_mode:
self.exp_nums[i] = n
if self.full_pass:
self.exp_nums[i] = n * len(self.part_inds)
# else:
# self.exp_nums[i] = n
self.exp_num += self.exp_nums[i]
self.seq_ids.append(i)
def __len__(self):
if self.test_mode:
return 100
return self.exp_num
def sample_multiframe_randomly(self):
n_lim = self.n_lim
inds = list(np.arange(1, self.bs+1))
all_inds = [0]
slotsize = len(inds) / (n_lim - 1) + 1
# print(slotsize)
for i in range(0, n_lim - 1):
if len(all_inds) == n_lim:
continue
si = i * slotsize
ei = (i + 1) * slotsize
ei = min(ei, len(inds))
cur_inds = inds[si:ei]
# print(si)
# print(ei)
# print(cur_inds)
new_ind = np.random.choice(cur_inds, 1)
all_inds.append(new_ind[0])
return all_inds
def sample_multiframe_predefined(self, part_id):
return self.part_inds[part_id]
def __getitem__(self, idx):
seq_cnt = 0
seq_id = self.seq_ids[seq_cnt]
agg_sum = 0
while idx >= agg_sum:
agg_sum += self.exp_nums[seq_id]
seq_cnt += 1
if seq_cnt < len(self.seq_ids):
seq_id = self.seq_ids[seq_cnt]
seq_cnt -= 1
seq_id = self.seq_ids[seq_cnt]
agg_sum -= self.exp_nums[seq_id]
idx = idx - agg_sum
if self.batch_mode:
if self.full_pass:
idx0 = int(idx /len(self.part_inds))
part_id = idx - idx0 * len(self.part_inds)
inds = self.sample_multiframe_predefined(part_id)
mfb = self.indexer.get_multi_frame_batch(seq_id, idx0, inds)
else:
inds = self.sample_multiframe_randomly()
mfb = self.indexer.get_multi_frame_batch(seq_id, idx, inds)
if len(mfb['images']) == 0:
if idx < len(self) - 1:
return self.__getitem__(idx + 1)
else:
return self.__getitem__(0)
else:
return mfb
else:
# if self.test_mode:
batch_data_len = self.bs # * (self.bs - 1) / 2
call_id = int(np.floor(idx / batch_data_len))
pair_id = idx - call_id * batch_data_len
f1 = 0
f2 = pair_id + 1
return self.indexer.get_dual_frame_batch(seq_id, call_id, f1, f2, self.dwn_factor)
# else:
# call_id = idx
# all_inds = list(np.arange(0, self.bs))
# f1, f2 = random.sample(all_inds, 2)
# return self.indexer.get_dual_frame_batch(seq_id, call_id, f1, f2)
def get_combined_training_v2(pt_per_line=5, n_lim=6, is_noisy=False):
kitti_path = '../kittieuroc/'
data_path = '../batched/'
is_pyramid = False
is_careful = True
downsample_factor = 1.0
kwargs = {'num_workers': 4, 'pin_memory': True}
train_dataset = DatasetBatch(kitti_path, data_path, seq_inds = [0,1,2,3,4,5,6,7,8,9,12,13,20,21],
test_mode=False,
is_color=False, is_pyramid=is_pyramid, is_careful=is_careful,
dwn_factor=downsample_factor, batch_mode=True, pt_per_line=pt_per_line, n_lim=n_lim, is_noisy=is_noisy) # 3,0
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=1,
shuffle=True,
collate_fn=compose_batch,
**kwargs)
test_dataset = DatasetBatch(kitti_path, data_path, [10], test_mode=False, is_color=False, is_pyramid=is_pyramid,
is_careful=is_careful,
dwn_factor=downsample_factor, batch_mode=True, pt_per_line=pt_per_line, n_lim=n_lim, full_pass=True, is_noisy=is_noisy) # 3,0
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=1,
shuffle=False,
collate_fn=compose_batch,
**kwargs)
test_dataset_2 = DatasetBatch(kitti_path, data_path, [11], False, False, is_pyramid, is_careful,
dwn_factor=downsample_factor, batch_mode=True, pt_per_line=pt_per_line, n_lim=n_lim,
full_pass=True) # 3,0
test_loader_2 = torch.utils.data.DataLoader(test_dataset_2,
batch_size=1,
shuffle=False,
collate_fn=compose_batch,
**kwargs)
return train_loader, test_loader, test_loader_2
def get_combined_test(seq_id, pt_per_line=5, n_lim=6):
kitti_path = '/media/hpc2_storage/avakhitov/kittieuroc/'
data_path = '/media/hpc2_storage/avakhitov/kittieuroc/batched/'
is_pyramid = False
is_careful = True
downsample_factor = 1.0
kwargs = {'num_workers': 4, 'pin_memory': True}
test_dataset = DatasetBatch(kitti_path, data_path, [seq_id], False, False, is_pyramid, is_careful,
downsample_factor, batch_mode=True, pt_per_line=pt_per_line, n_lim=n_lim, full_pass=True) # 3,0
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=1,
shuffle=False,
collate_fn=compose_batch,
**kwargs)
return test_loader
class InferenceDataset(Dataset):
def __init__(self, datadict, pt_per_line=5):
self.datadict = datadict
self.pt_per_line = pt_per_line
def __len__(self):
return len(self.datadict)
def __getitem__(self, idx):
img_fullpath, dets_fullpath, id, pair_id = self.datadict[idx]
img = cv2.imread(img_fullpath, 0)
img = np.reshape(img, (1, img.shape[0], img.shape[1]))
lines = cv2.imread(dets_fullpath, -1)
lines = line_sampler.prepare_grid_numpy_vec(lines, 1.0, self.pt_per_line)
fin_dict = {}
fin_dict['images'] = img
# print(len(fin_dict['images']))
fin_dict['lines'] = lines
fin_dict['image_ids'] = id
fin_dict['pair_ids'] = pair_id
return fin_dict
| 28,142 | 36.22619 | 177 | py |
lld-public | lld-public-master/data/__init__.py | 0 | 0 | 0 | py | |
DocBank | DocBank-master/scripts/pdf_process.py | import multiprocessing
import argparse
import pdfplumber
import os
from tqdm import tqdm
from pdfminer.layout import LTChar, LTLine
import re
from collections import Counter
import pdf2image
import numpy as np
from PIL import Image
def within_bbox(bbox_bound, bbox_in):
assert bbox_bound[0] <= bbox_bound[2]
assert bbox_bound[1] <= bbox_bound[3]
assert bbox_in[0] <= bbox_in[2]
assert bbox_in[1] <= bbox_in[3]
x_left = max(bbox_bound[0], bbox_in[0])
y_top = max(bbox_bound[1], bbox_in[1])
x_right = min(bbox_bound[2], bbox_in[2])
y_bottom = min(bbox_bound[3], bbox_in[3])
if x_right < x_left or y_bottom < y_top:
return False
intersection_area = (x_right - x_left) * (y_bottom - y_top)
bbox_in_area = (bbox_in[2] - bbox_in[0]) * (bbox_in[3] - bbox_in[1])
if bbox_in_area == 0:
return False
iou = intersection_area / float(bbox_in_area)
return iou > 0.95
def worker(pdf_file, data_dir, output_dir):
try:
pdf_images = pdf2image.convert_from_path(os.path.join(data_dir, pdf_file))
except:
return
page_tokens = []
try:
pdf = pdfplumber.open(os.path.join(data_dir, pdf_file))
except:
return
for page_id in tqdm(range(len(pdf.pages))):
tokens = []
this_page = pdf.pages[page_id]
anno_img = np.ones([int(this_page.width), int(this_page.height)] + [3], dtype=np.uint8) * 255
words = this_page.extract_words(x_tolerance=1.5)
lines = []
for obj in this_page.layout._objs:
if not isinstance(obj, LTLine):
continue
lines.append(obj)
for word in words:
word_bbox = (float(word['x0']), float(word['top']), float(word['x1']), float(word['bottom']))
objs = []
for obj in this_page.layout._objs:
if not isinstance(obj, LTChar):
continue
obj_bbox = (obj.bbox[0], float(this_page.height) - obj.bbox[3],
obj.bbox[2], float(this_page.height) - obj.bbox[1])
if within_bbox(word_bbox, obj_bbox):
objs.append(obj)
fontname = []
for obj in objs:
fontname.append(obj.fontname)
if len(fontname) != 0:
c = Counter(fontname)
fontname, _ = c.most_common(1)[0]
else:
fontname = 'default'
# format word_bbox
width = int(this_page.width)
height = int(this_page.height)
f_x0 = min(1000, max(0, int(word_bbox[0] / width * 1000)))
f_y0 = min(1000, max(0, int(word_bbox[1] / height * 1000)))
f_x1 = min(1000, max(0, int(word_bbox[2] / width * 1000)))
f_y1 = min(1000, max(0, int(word_bbox[3] / height * 1000)))
word_bbox = tuple([f_x0, f_y0, f_x1, f_y1])
# plot annotation
x0, y0, x1, y1 = word_bbox
x0, y0, x1, y1 = int(x0 * width / 1000), int(y0 * height / 1000), int(x1 * width / 1000), int(
y1 * height / 1000)
anno_color = [0, 0, 0]
for x in range(x0, x1):
for y in range(y0, y1):
anno_img[x, y] = anno_color
word_bbox = tuple([str(t) for t in word_bbox])
word_text = re.sub(r"\s+", "", word['text'])
tokens.append((word_text,) + word_bbox + (fontname,))
for figure in this_page.figures:
figure_bbox = (float(figure['x0']), float(figure['top']), float(figure['x1']), float(figure['bottom']))
# format word_bbox
width = int(this_page.width)
height = int(this_page.height)
f_x0 = min(1000, max(0, int(figure_bbox[0] / width * 1000)))
f_y0 = min(1000, max(0, int(figure_bbox[1] / height * 1000)))
f_x1 = min(1000, max(0, int(figure_bbox[2] / width * 1000)))
f_y1 = min(1000, max(0, int(figure_bbox[3] / height * 1000)))
figure_bbox = tuple([f_x0, f_y0, f_x1, f_y1])
# plot annotation
x0, y0, x1, y1 = figure_bbox
x0, y0, x1, y1 = int(x0 * width / 1000), int(y0 * height / 1000), int(x1 * width / 1000), int(
y1 * height / 1000)
anno_color = [0, 0, 0]
for x in range(x0, x1):
for y in range(y0, y1):
anno_img[x, y] = anno_color
figure_bbox = tuple([str(t) for t in figure_bbox])
word_text = '##LTFigure##'
fontname = 'default'
tokens.append((word_text,) + figure_bbox + (fontname,))
for line in this_page.lines:
line_bbox = (float(line['x0']), float(line['top']), float(line['x1']), float(line['bottom']))
# format word_bbox
width = int(this_page.width)
height = int(this_page.height)
f_x0 = min(1000, max(0, int(line_bbox[0] / width * 1000)))
f_y0 = min(1000, max(0, int(line_bbox[1] / height * 1000)))
f_x1 = min(1000, max(0, int(line_bbox[2] / width * 1000)))
f_y1 = min(1000, max(0, int(line_bbox[3] / height * 1000)))
line_bbox = tuple([f_x0, f_y0, f_x1, f_y1])
# plot annotation
x0, y0, x1, y1 = line_bbox
x0, y0, x1, y1 = int(x0 * width / 1000), int(y0 * height / 1000), int(x1 * width / 1000), int(
y1 * height / 1000)
anno_color = [0, 0, 0]
for x in range(x0, x1 + 1):
for y in range(y0, y1 + 1):
anno_img[x, y] = anno_color
line_bbox = tuple([str(t) for t in line_bbox])
word_text = '##LTLine##'
fontname = 'default'
tokens.append((word_text,) + line_bbox + (fontname, ))
anno_img = np.swapaxes(anno_img, 0, 1)
anno_img = Image.fromarray(anno_img, mode='RGB')
page_tokens.append((page_id, tokens, anno_img))
pdf_images[page_id].save(
os.path.join(output_dir, pdf_file.replace('.pdf', '') + '_{}_ori.jpg'.format(str(page_id))))
anno_img.save(
os.path.join(output_dir, pdf_file.replace('.pdf', '') + '_{}_ann.jpg'.format(str(page_id))))
with open(os.path.join(output_dir, pdf_file.replace('.pdf', '') + '_{}.txt'.format(str(page_id))),
'w',
encoding='utf8') as fp:
for token in tokens:
fp.write('\t'.join(token) + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the pdf files.",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the output data will be written.",
)
args = parser.parse_args()
pdf_files = list(os.listdir(args.data_dir))
pdf_files = [t for t in pdf_files if t.endswith('.pdf')]
# pool = multiprocessing.Pool(processes=1)
for pdf_file in tqdm(pdf_files):
# pool.apply_async(worker, (pdf_file, args.data_dir, args.output_dir))
worker(pdf_file, args.data_dir, args.output_dir)
# pool.close()
# pool.join()
| 7,516 | 36.029557 | 115 | py |
DocBank | DocBank-master/scripts/coco_format_scripts/DocbankToCOCO.py | import os
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
import pandas as pd
import json
from pprint import PrettyPrinter as pprint
from IPython.display import JSON
import re
from tqdm import tqdm
import traceback
class COCOData:
"""
COCOData class allows DocBank dataset to be converted to COCO Format.
Functions Available:
1. read_src_folder(src_path, dest_path): Reads all the DocBank JSON label files from the provided parent path and stores the destination path for later use to save the converted labels.
2. create_dict_layout(): Creates a basic layout for COCO format with basic static information.
3. set_image_properties(file_name, image_id): Sets Image Properties. Used in convert_to_coco() function.
4. set_caption_properties(object_dict, doc_object): Set Caption Properties. Used in set_object_properties() function.
5. set_object_properties(doc_object, doc_object_id, image_id): Set the Object properties. Used in convert_to_coco() function.
6. convert_to_coco(): Convert the source dataset to COCO format and store the converted data in coco_dictionary.
7. save_coco_dataset(): Saves the converted dataset into the destination folder (Destination Folder was provided in read_src_folder function).
"""
def __init__(self):
self.src_file_path = []
self.coco_file_path = []
self.src_dictionary = []
self.coco_dictionary = []
def read_src_folder(self, src_path, dest_path):
"""
Stores the full path of the JSON files into self.src_file_path
Stores the content of the source JSON files into self.coco_dictionary
Stores the full path to the new files (in COCO format)
"""
i=0
# Fetch each text file from the folders
for path in tqdm(Path(src_path).rglob('*.txt'), desc="Loading Source Files"):
# Open the file and read the content in JSON datatype
file = pd.read_table(path, header=None, names=["token", "x0","y0", "x1", "y1", "R", "G","B", "name", "label"])
# Prepare string for coco format json file
coco_file_path = str(path).replace(".txt", ".json")
coco_file_path = coco_file_path.replace(src_path, dest_path)
self.src_file_path.append(str(path))
self.src_dictionary.append(file)
self.coco_file_path.append(str(coco_file_path))
def create_dict_layout(self):
temp_dict = {}
temp_dict["info"] = {
"year": "",
"version": "1",
"description": "",
"contributor": "",
"url": "",
"date_created": "",
}
temp_dict["licenses"] = []
['abstract',
'author',
'caption',
'equation',
'figure',
'footer',
'list',
'paragraph',
'reference',
'section',
'table',
'title']
temp_dict["categories"] = [{"id": 0,"name": "Abstract","supercategory": ""},{"id": 1,"name": "Author","supercategory": ""},
{"id": 2,"name": "Caption","supercategory": ""},{"id": 3,"name": "Equation","supercategory": ""},
{"id": 4,"name": "Figure","supercategory": ""},{"id": 5,"name": "Footer","supercategory": ""},
{"id": 6,"name": "List","supercategory": ""},{"id": 7,"name": "Paragraph","supercategory": ""},
{"id": 8,"name": "Reference","supercategory": ""},{"id": 9,"name": "Section","supercategory": ""},
{"id": 10,"name": "Table","supercategory": ""},{"id": 11,"name": "Title","supercategory": ""},
{"id": 12,"name": "Date","supercategory": ""}]
temp_dict["images"] = []
temp_dict["annotations"] = []
return temp_dict
# Image denotes the image of a page where a set of objects exist
def set_image_properties(self, file_name, image_id):
# Get parent folder and the json file name separately.
image_path, image_name = os.path.split(file_name)
image_dict = {
"id": image_id,
"license": "",
"file_name": image_name,
"height": "",
"width": "",
"date_captured": "",
}
return image_dict
# Object denotes either a Table or Figure
def set_object_properties(self, doc_object, doc_object_id, image_id):
object_dict = {}
object_dict["id"] = doc_object_id
object_dict["image_id"] = image_id
object_dict["iscrowd"] = 0
object_dict["segmentation"] = []
category_list = {
'abstract': 0, 'author': 1, 'caption': 2, 'equation': 3, 'figure': 4, 'footer': 5,
'list': 6, 'paragraph': 7, 'reference': 8, 'section': 9, 'table': 10, 'title': 11, "date": 12}
object_dict["category_id"] = category_list[doc_object[9]]
object_width = doc_object[3] - doc_object[1],
object_height = doc_object[4] - doc_object[2],
object_dict["bbox"] = [
int(doc_object[1]),
int(doc_object[2]),
int(object_width[0]),
int(object_height[0])
]
object_dict["area"] = int(object_width[0] * object_height[0])
return object_dict
def set_caption_properties(self, object_dict, doc_object):
object_dict["caption"] = doc_object["caption_text"]
def convert_to_coco(self):
try:
# Init Image ID
image_id = 0
# Init Object ID
doc_object_id = 0
# Fetch each JSON file present in the folders
for i in tqdm(range(len(self.src_file_path)), desc="Convering Source JSON to COCO JSON"):
json_dict = self.create_dict_layout()
image_dict = self.set_image_properties(os.path.split(self.coco_file_path[i])[1].replace(".json", ".jpg"), image_id)
# Each Image present in the file is fetched and added to a cocoData object
for doc_object in self.src_dictionary[i].values:
object_dict = self.set_object_properties(doc_object, doc_object_id, image_id)
# Add the object properties to the annotations key in COCO
json_dict["annotations"].append(object_dict)
# Increment the object ID for next annotated object in the file
doc_object_id += 1
# Increment the Image ID for the next Image in the file
image_id+=1
# Extract Image width and height if annotations exist. There has to be atleast one annotation for an image to have the dimension attributes.
json_dict["images"].append(image_dict)
self.coco_dictionary.append(json_dict)
except:
traceback.print_exc()
# Converts final dictionary in COCO format for storing into file.
def save_coco_dataset(self):
try:
for i in tqdm(range(len(self.coco_file_path))):
coco_file_dir = os.path.split(self.coco_file_path[i])[0]
if not os.path.exists(coco_file_dir):
# Creates the parent folder and all the subfolders for the file.
# Does not throw an error if parent or any subfolders already exists.
Path(coco_file_dir).mkdir(parents=True, exist_ok=True)
output_file = open(self.coco_file_path[i], mode="w")
output_file.writelines(json.dumps(self.coco_dictionary[i], indent=4))
except:
traceback.print_exc()
# finally:
# # output_file.close()
| 7,813 | 42.653631 | 193 | py |
xcos | xcos-master/src/main.py | import os
import argparse
# import warnings
import torch
from utils.logging_config import logger
from pipeline import TrainingPipeline, TestingPipeline, EvaluationPipeline
def main(args):
# load config file from checkpoint, this will include the training information (epoch, optimizer parameters)
if args.resume is not None:
logger.info(f"Resuming checkpoint: {args.resume} ...")
resumed_checkpoint = torch.load(args.resume)
else:
resumed_checkpoint = None
args.resumed_checkpoint = resumed_checkpoint
if args.device:
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
##################
# Setup pipeline #
##################
if args.mode == 'train':
pipeline = TrainingPipeline(args)
elif args.mode == 'test':
pipeline = TestingPipeline(args)
elif args.mode == 'eval':
pipeline = EvaluationPipeline(args)
else:
raise NotImplementedError(f'Mode {args.mode} not defined.')
################
# Run pipeline #
################
pipeline.run()
def parse_args():
parser = argparse.ArgumentParser(description='PyTorch Template')
parser.add_argument(
'-tc', '--template_config', default=None, type=str,
help=('Template configuraion file. It should contain all default configuration '
'and will be overwritten by specified config.')
)
parser.add_argument(
'-sc', '--specified_configs', default=None, type=str, nargs='+',
help=('Specified configuraion files. They serve as experiemnt controls and will '
'overwrite template configs.')
)
parser.add_argument('-r', '--resume', default=None, type=str,
help='path to latest checkpoint (default: None)')
parser.add_argument('-p', '--pretrained', default=None, type=str,
help='path to pretrained checkpoint (default: None)')
parser.add_argument('-d', '--device', default=None, type=str,
help='indices of GPUs to enable (default: all)')
parser.add_argument('--mode', type=str, choices=['train', 'test', 'eval'], default='train')
parser.add_argument('--saved_keys', default=['data_target', 'model_output'], type=str, nargs='+',
help='Specify the keys to save at testing mode.')
parser.add_argument('--ckpts_subdir', type=str, default='ckpts', help='Subdir name for ckpts saving.')
parser.add_argument('--outputs_subdir', type=str, default='outputs', help='Subdir name for outputs saving.')
args = parser.parse_args()
# Set template config to default if not given
if args.template_config is None:
args.template_config = f'configs/template_{args.mode}_config.json'
return args
if __name__ == '__main__':
# with warnings.catch_warnings():
# warnings.simplefilter('error')
args = parse_args()
main(args)
| 2,920 | 35.5125 | 112 | py |
xcos | xcos-master/src/GradCam.py | from PIL import Image
import cv2
import numpy as np
import torch.nn.functional as F
from model.xcos_modules import l2normalize
class GradientExtractor:
""" Extracting activations and
registering gradients from targetted intermediate layers
"""
def __init__(self, model):
self.model = model
def __call__(self, img1, img2):
self.gradients = []
feat1, out1 = self.forward(img1)
feat2, out2 = self.forward(img2)
return feat1, feat2, out1, out2
def save_gradient(self, grad):
self.gradients.append(grad)
def forward(self, x):
feats = []
x = self.model.input_layer(x)
x = self.model.body(x)
x.register_hook(self.save_gradient)
feats.append(x)
x = self.model.output_layer(x)
return feats, x
class ModelOutputs:
""" Making a forward pass, and getting:
1. The network output.
2. Activations from intermeddiate targetted layers.
3. Gradients from intermeddiate targetted layers.
"""
def __init__(self, model):
self.model = model
self.extractor = GradientExtractor(self.model)
def get_grads(self):
return self.extractor.gradients
def __call__(self, img1, img2):
feat1, feat2, out1, out2 = self.extractor(img1, img2)
out1 = l2normalize(out1)
out2 = l2normalize(out2)
cos = F.cosine_similarity(out1, out2, dim=1, eps=1e-6)
return feat1, feat2, cos
class FaceGradCam:
def __init__(self, model):
self.model = model
self.extractor = ModelOutputs(self.model)
def __call__(self, img1, img2):
feat1, feat2, output = self.extractor(img1, img2)
self.model.zero_grad()
output.backward(retain_graph=True)
grads = self.extractor.get_grads()
hm1 = self.make_heatmap(grads[0].cpu().data.numpy(), feat1[0].cpu().data.numpy())
hm2 = self.make_heatmap(grads[1].cpu().data.numpy(), feat2[0].cpu().data.numpy())
return hm1, hm2
def make_heatmap(self, grad, feat):
"""Batch operation supported
"""
weights = np.mean(grad, axis=(-2, -1), keepdims=True)
x = weights * feat
x = x.sum(axis=1)
x = np.maximum(0, x)
x = x - np.min(x, axis=(-2, -1), keepdims=True)
x = x / np.max(x, axis=(-2, -1), keepdims=True)
x = 1. - x
return x
def make_img(self, heatmap, size, ori_img=None):
"""Batch operation NOT suppored
"""
hm = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)
hm = cv2.resize(hm, size)
if ori_img is not None:
hm = np.float32(hm) / 255 + np.transpose(ori_img.numpy(), (1, 2, 0)) * 0.5 + 0.5
hm /= np.max(hm)
hm = np.uint8(255 * hm)
return Image.fromarray(hm)
| 2,847 | 27.48 | 92 | py |
xcos | xcos-master/src/pipeline/base_pipeline.py | import os
import json
import datetime
import logging
from abc import ABC, abstractmethod
import torch
import pandas as pd
from utils.util import get_instance
from utils.visualization import WriterTensorboard
from utils.logging_config import logger
from utils.global_config import global_config
import data_loader.data_loaders as module_data
import model.metric as module_metric
import model.model as module_arch
class BasePipeline(ABC):
"""
Base pipeline for training/validation/testing process
"""
def __init__(
self, args
):
global_config.setup(args.template_config, args.specified_configs, args.resumed_checkpoint)
self.start_time = datetime.datetime.now().strftime('%m%d_%H%M%S')
self.saving_dir = self._create_saving_dir(args)
self._add_logging_file_handler()
self._save_config_file()
self._print_config_messages()
self.device, self.device_ids = self._setup_device()
self.data_loader = self._setup_data_loader()
self.valid_data_loaders = self._setup_valid_data_loaders()
self.test_data_loaders = self._setup_test_data_loaders()
self.optimize_strategy = global_config.get('optimize_strategy', 'normal')
self.validation_strategy = global_config.get('validation_strategy', self.optimize_strategy)
self._setup_model()
self._setup_data_parallel()
self._setup_writer()
self.evaluation_metrics = self._setup_evaluation_metrics()
self._setup_pipeline_specific_attributes()
self._setup_config()
if args.resumed_checkpoint is not None:
self._resume_checkpoint(args.resumed_checkpoint)
if args.pretrained is not None:
self._load_pretrained(args.pretrained)
self.worker_outputs = {}
self.workers = self._create_workers()
@abstractmethod
def _setup_config(self):
pass
def _setup_pipeline_specific_attributes(self):
pass
@abstractmethod
def _create_workers(self):
return []
# =============== functions for setting up attributes (start) ================
@abstractmethod
def _create_saving_dir(self, resume_path):
""" Create directory to save ckpt, config, and logges messags. Return the created path """
pass
def _save_config_file(self):
# Save configuration file into checkpoint directory
config_save_path = os.path.join(self.saving_dir, 'config.json')
with open(config_save_path, 'w') as handle:
json.dump(global_config, handle, indent=4, sort_keys=False)
def _add_logging_file_handler(self):
fileHandler = logging.FileHandler(os.path.join(self.saving_dir, 'log.txt'))
logger.addHandler(fileHandler)
def _print_config_messages(self):
global_config.print_changed()
logger.info(f'Experiment name: {global_config["name"]}')
def _setup_device(self):
def prepare_device(n_gpu_use):
"""
setup GPU device if available, move model into configured device
"""
n_gpu = torch.cuda.device_count()
if n_gpu_use > 0 and n_gpu == 0:
logger.warning(
"Warning: There\'s no GPU available on this machine, training will be performed on CPU.")
n_gpu_use = 0
if n_gpu_use > n_gpu:
msg = (f"Warning: The number of GPU\'s configured to use is {n_gpu_use} "
f"but only {n_gpu} are available on this machine.")
logger.warning(msg)
n_gpu_use = n_gpu
device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')
list_ids = list(range(n_gpu_use))
return device, list_ids
device, device_ids = prepare_device(global_config['n_gpu'])
return device, device_ids
def _setup_model(self):
""" Setup model and print summary """
model = get_instance(
module_arch, 'arch', global_config,
)
# Print out the model architecture and number of parameters
model.summary()
self.model = model.to(self.device)
def _setup_data_parallel(self):
if len(self.device_ids) > 1:
self.model = torch.nn.DataParallel(self.model, device_ids=self.device_ids)
def _get_non_parallel_model(self):
model = self.model.module if isinstance(self.model, torch.nn.DataParallel) else self.model
return model
def _setup_data_loader(self, key='data_loader'):
return get_instance(module_data, key, global_config)
def _setup_data_loaders(self, key):
data_loaders = [
getattr(module_data, entry['type'])(**entry['args'])
for entry in global_config[key].values()
]
return data_loaders
def _setup_valid_data_loaders(self):
if 'valid_data_loaders' in global_config.keys():
valid_data_loaders = self._setup_data_loaders('valid_data_loaders')
if self.data_loader.validation_split > 0:
raise ValueError(f'Split ratio should not > 0 when other validation loaders are specified.')
elif self.data_loader.validation_split > 0:
valid_data_loaders = [self.data_loader.split_validation()]
else:
valid_data_loaders = []
return valid_data_loaders
def _setup_test_data_loaders(self):
return None
def _setup_evaluation_metrics(self):
evaluation_metrics = [
getattr(module_metric, entry['type'])(**entry['args']).to(self.device)
for entry in global_config['metrics'].values()
]
return evaluation_metrics
def _setup_optimizers(self):
""" Setup optimizers according to configuration.
Each optimizer has its corresponding network(s) to train, specified by 'target_network' in configuraion.
If no `target_network` is specified, all parameters of self.model will be included.
"""
self.optimizers = {}
for name, entry in global_config['optimizers'].items():
model = self._get_non_parallel_model()
if 'target_network' in entry.keys():
network = getattr(model, entry['target_network'])
else:
network = model
logger.warning(f'Target network of optimizer "{name}" not specified. '
f'All params of self.model will be included.')
trainable_params = filter(lambda p: p.requires_grad, network.parameters())
self.optimizers[name] = getattr(torch.optim, entry['type'])(trainable_params, **entry['args'])
def _setup_writer(self):
# setup visualization writer instance
writer_dir = os.path.join(global_config['visualization']['log_dir'], global_config['name'], self.start_time)
self.writer = WriterTensorboard(writer_dir, logger, global_config['visualization']['tensorboardX'])
self.start_epoch = 1
self.train_iteration_count = 0
self.valid_iteration_counts = [0] * len(self.valid_data_loaders)
# =============== functions for setting up attributes (start) ================
def _load_pretrained(self, pretrained_path):
""" Load pretrained model not strictly """
logger.info(f"Loading pretrained checkpoint: {pretrained_path} ...")
checkpoint = torch.load(pretrained_path)
model = self._get_non_parallel_model()
model.load_state_dict(checkpoint['state_dict'], strict=False)
def _resume_checkpoint(self, resumed_checkpoint):
"""
Resume from saved resumed_checkpoints
:param resume_path: resumed_checkpoint path to be resumed
"""
self._resume_model_params(resumed_checkpoint)
from .training_pipeline import TrainingPipeline
if isinstance(self, TrainingPipeline):
self._resume_training_state(resumed_checkpoint)
logger.info(f"resumed_checkpoint (trained epoch {self.start_epoch - 1}) loaded")
def _resume_training_state(self, resumed_checkpoint):
""" States only for training pipeline like iteration counts, optimizers,
and lr_schedulers are resumed in this function """
self.start_epoch = resumed_checkpoint['epoch'] + 1
self.monitor_best = resumed_checkpoint['monitor_best']
# Estimated iteration_count is based on length of the current data loader,
# which will be wrong if the batch sizes between the two training processes are different.
self.train_iteration_count = resumed_checkpoint.get('train_iteration_count', 0)
self.valid_iteration_counts = resumed_checkpoint.get(
'valid_iteration_counts', [0] * len(self.valid_data_loaders))
self.valid_iteration_counts = list(self.valid_iteration_counts)
# load optimizer state from resumed_checkpoint only when optimizer type is not changed.
optimizers_ckpt = resumed_checkpoint['optimizers']
for key in global_config['optimizers'].keys():
if key not in optimizers_ckpt.keys():
logger.warning(f'Optimizer name {key} in config file is not in checkpoint (not resumed)')
elif resumed_checkpoint['config']['optimizers'][key]['type'] != global_config['optimizers'][key]['type']:
logger.warning(f'Optimizer type in config file is different from that of checkpoint (not resumed)')
else:
self.optimizers[key].load_state_dict(optimizers_ckpt[key])
def _resume_model_params(self, resumed_checkpoint):
""" Load model parameters from resumed checkpoint """
# load architecture params from resumed_checkpoint.
if resumed_checkpoint['config']['arch'] != global_config['arch']:
logger.warning(
'Warning: Architecture config given in config file is different from that of resumed_checkpoint. '
'This may yield an exception while state_dict is being loaded.'
)
model = self._get_non_parallel_model()
model.load_state_dict(resumed_checkpoint['state_dict'])
def _print_and_write_log(self, epoch, worker_outputs, write=True):
# This function is to print out epoch summary of workers
# and append these summary values on the summary csv file.
if write:
self.writer.set_step(epoch, 'epoch_average') # TODO: See if we can use tree-structured tensorboard logging
logger.info(f' epoch: {epoch:d}')
epoch_record = {'epoch': epoch}
# print the logged info for each loader (corresponding to each worker)
for loader_name, output in worker_outputs.items():
log = output['log']
if global_config.verbosity >= 1:
logger.info(f' {loader_name}:')
for key, value in log.items():
if global_config.verbosity >= 1:
logger.info(f' {str(key):20s}: {value:.4f}')
if 'elapsed_time' not in key and write:
value = value.item() if isinstance(value, torch.Tensor) else value
epoch_record[f'{loader_name}_{key}'] = [value]
# TODO: See if we can use tree-structured tensorboard logging
self.writer.add_scalar(f'{loader_name}_{key}', value)
# concatenate summary of this epoch into 'epochs_summary.csv'
new_df = pd.DataFrame(epoch_record)
csv_file = os.path.join(self.saving_dir, 'epochs_summary.csv')
df = pd.concat([pd.read_csv(csv_file), new_df]) if os.path.exists(csv_file) else new_df
df.to_csv(csv_file, index=False)
| 11,688 | 43.109434 | 119 | py |
xcos | xcos-master/src/pipeline/testing_pipeline.py | import os
import numpy as np
from .base_pipeline import BasePipeline
from worker.tester import Tester
from utils.global_config import global_config
from utils.util import ensure_dir
from utils.logging_config import logger
class TestingPipeline(BasePipeline):
def __init__(self, args):
"""
# You may need this line to solve the error described in https://github.com/pytorch/pytorch/issues/973
torch.multiprocessing.set_sharing_strategy('file_system')
"""
super().__init__(args)
def _create_saving_dir(self, args):
saving_dir = os.path.join(global_config['trainer']['save_dir'], args.outputs_subdir,
global_config['name'])
if os.path.exists(saving_dir):
logger.warning(f'The saving directory "{saving_dir}" already exists. '
f'If continued, some files might be overwriten.')
response = input('Proceed? [y/N] ')
if response != 'y':
logger.info('Exit.')
exit()
ensure_dir(saving_dir)
if args.resume is not None:
link = os.path.join(saving_dir, 'resumed_ckpt.pth')
if os.path.exists(link):
os.remove(link)
# Mark the used resume path by a symbolic link
os.symlink(os.path.abspath(args.resume), link)
return saving_dir
def _setup_data_loader(self):
return None
def _setup_valid_data_loaders(self):
return []
def _setup_config(self):
pass
def _create_workers(self):
workers = []
# Add a tester for each data loader
for test_data_loader in self.test_data_loaders:
tester = Tester(pipeline=self, test_data_loader=test_data_loader)
workers += [tester]
return workers
def _save_inference_results(self, name: str, worker_output: dict):
path = os.path.join(self.saving_dir, f'{name}_output.npz')
logger.info(f'Saving {path} ...')
np.savez(path, **worker_output)
def _setup_test_data_loaders(self):
if 'test_data_loaders' in global_config.keys():
test_data_loaders = self._setup_data_loaders('test_data_loaders')
return test_data_loaders
else:
raise ValueError(f"No test_data_loaders key in config")
def run(self):
"""
Full testing pipeline logic
"""
for worker in self.workers:
worker_output = worker.run(0)
if not global_config.save_while_infer:
self._save_inference_results(worker.data_loader.name, worker_output['saved'])
self.worker_outputs[worker.data_loader.name] = worker_output
self._print_and_write_log(0, self.worker_outputs, write=True)
| 2,805 | 34.518987 | 110 | py |
xcos | xcos-master/src/pipeline/evaluation_pipeline.py | from .base_pipeline import BasePipeline
from worker.evaluator import Evaluator
from utils.global_config import global_config
class EvaluationPipeline(BasePipeline):
def __init__(self, args):
global_config.setup(args.template_config, args.specified_configs, args.resumed_checkpoint)
self._print_config_messages()
self.gt_data_loaders = self._setup_data_loaders('gt_data_loaders')
self.result_data_loaders = self._setup_data_loaders('result_data_loaders')
self.device, self.device_ids = self._setup_device()
self.evaluation_metrics = self._setup_evaluation_metrics()
self.workers = self._create_workers()
self.worker_outputs = {}
def _setup_config(self):
pass
def _setup_saving_dir(self, resume_path):
pass
def _create_saving_dir(self, resume_path):
pass
def _create_workers(self):
workers = []
# Add a evaluator for each data loader
for gt_data_loader, result_data_loader in zip(self.gt_data_loaders, self.result_data_loaders):
evaluator = Evaluator(self, gt_data_loader, result_data_loader)
workers += [evaluator]
return workers
def run(self):
"""
Full evaluation pipeline logic
"""
for worker in self.workers:
worker_output = worker.run(0)
self.worker_outputs[worker.result_data_loader.name] = worker_output
self._print_and_write_log(0, self.worker_outputs, write=False)
| 1,511 | 35 | 102 | py |
xcos | xcos-master/src/pipeline/__init__.py | from .testing_pipeline import TestingPipeline # NOQA
from .training_pipeline import TrainingPipeline # NOQA
from .evaluation_pipeline import EvaluationPipeline # NOQA
| 170 | 41.75 | 59 | py |
xcos | xcos-master/src/pipeline/training_pipeline.py | import math
import os
import torch
from .base_pipeline import BasePipeline
from worker.trainer import Trainer
from worker.validator import Validator
import model.loss as module_loss
from utils.global_config import global_config
from utils.logging_config import logger
from utils.util import ensure_dir
class TrainingPipeline(BasePipeline):
def __init__(self, args):
super().__init__(args)
def _setup_pipeline_specific_attributes(self):
self._setup_loss_functions()
if self.optimize_strategy == 'GAN':
self._setup_gan_loss_functions()
self._setup_optimizers()
self._setup_lr_schedulers()
def _create_saving_dir(self, args):
saving_dir = os.path.join(global_config['trainer']['save_dir'], args.ckpts_subdir,
global_config['name'], self.start_time)
ensure_dir(saving_dir)
# create a link to the resumed checkpoint as a reference
if args.resume is not None:
link = os.path.join(saving_dir, 'resumed_ckpt.pth')
os.symlink(os.path.abspath(args.resume), link)
return saving_dir
def _setup_loss_functions(self):
self.loss_functions = [
getattr(module_loss, entry['type'])(**entry['args']).to(self.device)
for key, entry in global_config['losses'].items()
]
def _setup_gan_loss_functions(self):
""" Setup GAN loss functions. Will only be called when self.optimize_strategy == 'GAN'
The keys of gan_losses in config should have strict one-to-one mapping with names of optimizers. """
self.gan_loss_functions = {
key: getattr(module_loss, entry['type'])(**entry['args']).to(self.device)
for key, entry in global_config['gan_losses'].items()
}
def _setup_lr_schedulers(self):
""" Setup learning rate schedulers according to configuration. Note that the naming of
optimizers and lr_schedulers in configuration should have a strict one-to-one mapping.
"""
self.lr_schedulers = {}
for optimizer_name, optimizer in self.optimizers.items():
entry = global_config['lr_schedulers'][optimizer_name]
self.lr_schedulers[optimizer_name] = getattr(torch.optim.lr_scheduler, entry['type'])(
optimizer, **entry['args'])
def _create_workers(self):
trainer = Trainer(
self, self.data_loader, self.train_iteration_count
)
workers = [trainer]
for i, valid_data_loader in enumerate(self.valid_data_loaders):
workers.append(
Validator(
self, valid_data_loader, self.valid_iteration_counts[i]
)
)
return workers
def _setup_config(self):
self.epochs = global_config['trainer']['epochs']
self.save_freq = global_config['trainer']['save_freq']
# configuration to monitor model performance and save best
self.monitored_loader = global_config['trainer']['monitored_loader']
valid_loader_names = [loader.name for loader in self.valid_data_loaders]
assert self.monitored_loader in valid_loader_names, \
f"Config monitored loader '{self.monitored_loader}' is not in validation data loaders {valid_loader_names}"
self.monitored_metric = global_config['trainer']['monitored_metric']
valid_metric_names = [f"avg_{metric.nickname}" for metric in self.evaluation_metrics] + ["avg_loss"]
assert self.monitored_metric in valid_metric_names, \
f"Config monitored metric '{self.monitored_metric}' is not in valid evaluation metrics {valid_metric_names}"
self.monitor_mode = global_config['trainer']['monitor_mode']
assert self.monitor_mode in ['min', 'max', 'off']
self.monitor_best = math.inf if self.monitor_mode == 'min' else -math.inf
self.do_validation = len(self.valid_data_loaders) > 0
def _save_checkpoint(self, epoch, save_best=False):
"""
Saving checkpoints
:param epoch: current epoch number
:param save_best: if True, add '-best.pth' at the end of the best model
"""
arch = type(self.model).__name__
# assure that we save the model state without DataParallel module
if isinstance(self.model, torch.nn.DataParallel):
# get the original state out from DataParallel module
model_state = self.model.module.state_dict()
else:
model_state = self.model.state_dict()
state = {
'arch': arch,
'epoch': epoch,
'state_dict': model_state,
'optimizers': {key: optimizer.state_dict() for key, optimizer in self.optimizers.items()},
'monitor_best': self.monitor_best,
'config': global_config,
'train_iteration_count': self.train_iteration_count,
'valid_iteration_counts': self.valid_iteration_counts,
}
best_str = '-best' if save_best else ''
monitored_name = f'{self.monitored_loader}_{self.monitored_metric}'
filename = os.path.join(
self.saving_dir, f'ckpt-ep{epoch:04d}-{monitored_name}{self.monitor_best:.4f}{best_str}.pth'
)
torch.save(state, filename)
logger.info(f"Saving checkpoint: {filename} ...")
def _check_and_save_best(self, epoch, worker_outputs):
"""
Evaluate model performance according to configured metric, save best checkpoint as model_best
"""
best = False
if self.monitor_mode != 'off':
try:
metric_value = worker_outputs[self.monitored_loader]['log'][self.monitored_metric]
if (self.monitor_mode == 'min' and metric_value < self.monitor_best) or\
(self.monitor_mode == 'max' and metric_value > self.monitor_best):
self.monitor_best = metric_value
best = True
except KeyError:
if epoch == 1:
msg = f"Warning: Can\'t recognize metric '{self.monitored_metric}' in '{self.monitored_loader}' "\
+ f"for performance monitoring. model_best checkpoint won\'t be updated."
logger.warning(msg)
if epoch % self.save_freq == 0 or best:
self._save_checkpoint(epoch, save_best=best)
def _after_epoch(self, epoch, worker_outputs):
self._print_and_write_log(epoch, worker_outputs)
self._check_and_save_best(epoch, worker_outputs)
if self.lr_schedulers is not None:
for scheduler in self.lr_schedulers.values():
scheduler.step()
def run(self):
"""
Full training pipeline logic
"""
for epoch in range(self.start_epoch, self.epochs + 1):
for worker in self.workers:
worker_output = worker.run(epoch)
self.worker_outputs[worker.data_loader.name] = worker_output
self._after_epoch(epoch, self.worker_outputs)
| 7,092 | 41.728916 | 120 | py |
xcos | xcos-master/src/worker/tester.py | import os
import time
import torch
from torchvision.utils import save_image
from .worker_template import WorkerTemplate
from data_loader.base_data_loader import BaseDataLoader
from pipeline.base_pipeline import BasePipeline
from utils.global_config import global_config
from utils.logging_config import logger
from utils.verification import checkTFPN
class Tester(WorkerTemplate):
"""
Tester class
Note:
Inherited from WorkerTemplate.
"""
def __init__(self, pipeline: BasePipeline, test_data_loader: BaseDataLoader):
super().__init__(pipeline=pipeline, data_loader=test_data_loader, step=0)
for attr_name in ['saving_dir']:
setattr(self, attr_name, getattr(pipeline, attr_name))
@property
def enable_grad(self):
return False
def _run_and_optimize_model(self, data):
with torch.no_grad():
model_output = self.model(data, scenario='get_feature_and_xcos')
return model_output, None
def _setup_model(self):
self.model.eval()
def _to_log(self, epoch_stats):
return {}
def _init_output(self):
""" Initialize a dictioary structure to save inferenced results. """
for metric in self.evaluation_metrics:
metric.clear()
return {
'epoch_start_time': time.time(),
'saved': {k: [] for k in global_config.saved_keys}
}
def _update_output(self, epoch_output, products, write_metric=False):
""" Update the dictionary saver: extend entries """
self._update_all_metrics(products['data'], products['model_output'], write=write_metric)
def update_epoch_output_from_dict(dictionary):
for key in dictionary.keys():
if key not in global_config.saved_keys:
continue
value = dictionary[key]
saved_value = value.cpu().numpy() if torch.is_tensor(value) else value
epoch_output['saved'][key].extend([v for v in saved_value])
data, model_output = products['data'], products['model_output']
if global_config.save_while_infer:
# Clean previous results
epoch_output['saved'] = {k: [] for k in global_config.saved_keys}
for d in [data, model_output]:
update_epoch_output_from_dict(d)
if global_config.save_while_infer and global_config.arch.type == "xCosModel":
if global_config.arch.args.draw_qualitative_result:
# Save results
name = self.data_loader.name
# print(epoch_output.keys())
# print(epoch_output['saved'].keys())
# print(products.keys())
# ['flatten_feats', 'grid_feats', 'x_coses', 'attention_maps', 'grid_cos_maps', 'xcos_visualizations']
# print(len(products['model_output']['xcos_visualizations']))
# print(products['model_output']['xcos_visualizations'][0].shape)
# print(epoch_output['saved'].keys())
for i in range(len(epoch_output['saved']['xcos_visualizations'])):
index = epoch_output['saved']['index'][i]
visualization = epoch_output['saved']['xcos_visualizations'][i]
xcos = epoch_output['saved']['x_coses'][i]
is_same_label = epoch_output['saved']['is_same_labels'][i]
TFPN = checkTFPN(xcos, is_same_label)
output_path = os.path.join(self.saving_dir, f'{name}_{TFPN}_xcos_{xcos:.4f}_pair_{index:06d}.png')
save_image(visualization, output_path)
# output = {}
# for saved_key in epoch_output['saved'].keys():
# output[saved_key] = epoch_output['saved'][saved_key][i]
# output_path = os.path.join(self.saving_dir, f'{name}_index{index:06d}.npz')
# np.savez(output_path, **output)
if index % 1000 == 0:
logger.info(f'Saving output {output_path} ...')
return epoch_output
def _finalize_output(self, epoch_output):
""" Return saved inference results along with log messages """
log = {'elasped_time (s)': time.time() - epoch_output['epoch_start_time']}
avg_metrics = {metric.nickname: metric.finalize() for metric in self.evaluation_metrics}
for key, value in avg_metrics.items():
log[f"avg_{key}"] = value
return {'saved': epoch_output['saved'], 'log': log}
def _print_log(self, epoch, batch_idx, batch_start_time, loss):
logger.info(f"Batch {batch_idx}, saving output ..")
| 4,707 | 42.192661 | 118 | py |
xcos | xcos-master/src/worker/training_worker.py | import time
from .worker_template import WorkerTemplate
class TrainingWorker(WorkerTemplate):
"""
The middle class between WorkerTemplate and Trainer/Validator for
trainer/validator's common processing of epoch output.
Note:
Inherited from WorkerTemplate.
"""
def _init_output(self):
""" Initialize epoch statistics like elapsed time, total loss, and metrics """
epoch_start_time = time.time()
total_loss = 0
for metric in self.evaluation_metrics:
metric.clear()
return epoch_start_time, total_loss
def _update_output(self, output: dict, products: dict, write_metric=True):
""" Update epoch statistics """
loss = products['loss']
epoch_start_time, total_loss = output
total_loss += loss.item()
self._update_all_metrics(products['data'], products['model_output'], write=write_metric)
return epoch_start_time, total_loss
def _average_stats(self, total_loss):
""" Calculate the average loss/metrics in this epoch """
avg_loss = total_loss / len(self.data_loader)
avg_metrics = {metric.nickname: metric.finalize() for metric in self.evaluation_metrics}
return avg_loss, avg_metrics
def _finalize_output(self, output):
""" Return saved inference results along with log messages """
epoch_start_time, total_loss = output
avg_loss, avg_metrics = self._average_stats(total_loss)
log = {
'elapsed_time (s)': time.time() - epoch_start_time,
'avg_loss': avg_loss,
}
for key, value in avg_metrics.items():
log[f"avg_{key}"] = value
return {'log': log}
def _get_and_write_gan_loss(self, data, model_output, optimize_name):
""" Calculate GAN loss and write them to Tensorboard
"""
loss_function = self.gan_loss_functions[optimize_name]
loss = loss_function(data, model_output) * loss_function.weight
self.writer.add_scalar(f'{loss_function.nickname}', loss.item())
return loss
def _filter_evaluation_metrics(self, metrics, scenario):
assert scenario in ['training', 'validation']
metrics = [metric for metric in metrics if metric.scenario == scenario]
return metrics
| 2,310 | 37.516667 | 96 | py |
xcos | xcos-master/src/worker/evaluator.py | import time
from .worker_template import WorkerTemplate
from pipeline.base_pipeline import BasePipeline
from data_loader.base_data_loader import BaseDataLoader
from utils.logging_config import logger
from utils.global_config import global_config
class Evaluator(WorkerTemplate):
"""
Evaluator class
Note:
Inherited from WorkerTemplate.
"""
def __init__(
self,
pipeline: BasePipeline,
gt_data_loader: BaseDataLoader,
result_data_loader: BaseDataLoader,
*args
):
# Attributes listed below are shared from pipeline among all different workers.
for attr_name in ['device', 'evaluation_metrics']:
setattr(self, attr_name, getattr(pipeline, attr_name))
self.gt_data_loader = gt_data_loader
self.result_data_loader = result_data_loader
@property
def enable_grad(self):
return False
def _run_and_optimize_model(self, data_input):
return {}, None, []
def _setup_model(self):
pass
def _init_output(self):
epoch_start_time = time.time()
for metric in self.evaluation_metrics:
metric.clear()
return epoch_start_time
def _update_output(self, output, batch_products, write_metric=True):
epoch_start_time = output
self._update_all_metrics(batch_products['gt'], batch_products['result'], write=write_metric)
return epoch_start_time
def _finalize_output(self, output):
epoch_start_time = output
avg_metrics = {metric.nickname: metric.finalize() for metric in self.evaluation_metrics}
log = {
'elapsed_time (s)': time.time() - epoch_start_time,
}
for key, value in avg_metrics.items():
log[f"avg_{key}"] = value
return {'log': log}
def _to_log(self, epoch_stats):
return {}
def _print_log(self, epoch, batch_idx, batch_start_time, loss):
current_sample_idx = batch_idx * self.gt_data_loader.batch_size
total_sample_num = self.gt_data_loader.n_samples
sample_percentage = 100.0 * batch_idx / len(self.gt_data_loader)
batch_time = time.time() - batch_start_time
logger.info(
f'Epoch: {epoch} [{current_sample_idx}/{total_sample_num} '
f' ({sample_percentage:.0f}%)] '
f'BT: {batch_time:.2f}s'
)
def _iter_data(self, epoch):
output = self._init_output()
for batch_idx, (gt, result) in enumerate(zip(self.gt_data_loader, self.result_data_loader)):
batch_start_time = time.time()
gt = self._data_to_device(gt)
result = self._data_to_device(result)
batch_products = {'gt': gt, 'result': result}
output = self._update_output(output, batch_products, write_metric=False)
if batch_idx % global_config.log_step == 0:
if global_config.verbosity >= 2:
self._print_log(epoch, batch_idx, batch_start_time, 0)
return output
| 3,040 | 33.556818 | 100 | py |
xcos | xcos-master/src/worker/worker_template.py | import time
from abc import ABC, abstractmethod
import torch
from torchvision.utils import make_grid
from data_loader.base_data_loader import BaseDataLoader
from pipeline.base_pipeline import BasePipeline
from utils.global_config import global_config
from utils.util import batch_visualize_xcos
class WorkerTemplate(ABC):
""" Worker template, base class for trainer, validator and tester.
Child class need to implement at least the _run_and_optimize_model() method
that deals with the main optimization & model inference.
"""
def __init__(
self, pipeline: BasePipeline, data_loader: BaseDataLoader, step: int
):
# Attributes listed below are shared from pipeline among all different workers.
for attr_name in ['device', 'model', 'evaluation_metrics', 'writer', 'optimize_strategy']:
setattr(self, attr_name, getattr(pipeline, attr_name))
self.data_loader = data_loader
self.step = step # Tensorboard log step
# ============ Implement the following functions ==============
@property
@abstractmethod
def enable_grad(self):
pass
@abstractmethod
def _run_and_optimize_model(self, data):
""" Put data into model and optimize the model"""
return {}, None
def _print_log(self, epoch, batch_idx, batch_start_time, loss):
""" Print messages on terminal. """
pass
@abstractmethod
def _setup_model(self):
""" Set random seed and self.model.eval() or self.model.train() """
pass
@abstractmethod
def _init_output(self):
pass
@abstractmethod
def _update_output(self, output: dict, products: dict):
return output
@abstractmethod
def _finalize_output(self, epoch_output) -> dict:
""" The final output of worker.run() will be processed by this
function, whose responsibility is to create a dictionary contraining
log messages and/or saved inference outputs. """
pass
# ============ Implement the above functions ==============
def _update_all_metrics(self, data_input, model_output, write=True):
for metric in self.evaluation_metrics:
with torch.no_grad():
value = metric.update(data_input, model_output)
# some metrics do not have per-batch evaluation (e.g. FID), then value would be None
if write and value is not None:
self.writer.add_scalar(metric.nickname, value)
# Generally, the following function should not be changed.
def _write_data_to_tensorboard(self, data, model_output):
""" Write images to Tensorboard """
img_tensors = data["data_input"]
if not isinstance(img_tensors, torch.Tensor):
img_tensors = torch.cat(img_tensors)
if global_config.arch.type == "xCosModel":
img1s, img2s = data['data_input']
img1s = img1s.cpu().numpy()
img2s = img2s.cpu().numpy()
grid_cos_maps = model_output['grid_cos_maps'].squeeze().detach().cpu().numpy()
attention_maps = model_output['attention_maps'].squeeze().detach().cpu().numpy()
visualizations = batch_visualize_xcos(img1s, img2s, grid_cos_maps, attention_maps)
if len(visualizations) > 10:
visualizations = visualizations[:10]
self.writer.add_image("xcos_visualization", make_grid(torch.cat(visualizations), nrow=1))
if self.optimize_strategy == 'GAN':
self.writer.add_image("G_z", make_grid(model_output["G_z"], nrow=4, normalize=True))
self.writer.add_histogram("dist_G_z", model_output["G_z"])
self.writer.add_histogram("dist_x", img_tensors)
def _setup_writer(self):
""" Setup Tensorboard writer for each iteration """
self.writer.set_step(self.step, self.data_loader.name)
self.step += 1
def _get_and_write_losses(self, data, model_output):
""" Calculate losses and write them to Tensorboard
Losses (dict: nickname -> loss tensor) and total loss (tensor) will be returned.
"""
losses = {}
for loss_function in self.loss_functions:
if loss_function.weight <= 0.0:
continue
loss = loss_function(data, model_output) * loss_function.weight
losses[loss_function.nickname] = loss
self.writer.add_scalar(f'{loss_function.nickname}', loss.item())
if len(self.loss_functions) == 0:
total_loss = torch.zeros([1])
else:
total_loss = torch.stack(list(losses.values()), dim=0).sum(dim=0)
self.writer.add_scalar('total_loss', total_loss.item())
return losses, total_loss
def _data_to_device(self, data):
""" Put data into CPU/GPU """
for key in data.keys():
# Dataloader yeilds something that's not tensor, e.g data['video_id']
if torch.is_tensor(data[key]):
data[key] = data[key].to(self.device)
elif isinstance(data[key], list):
for i, elem in enumerate(data[key]):
data[key][i] = elem.to(self.device)
return data
def _iter_data(self, epoch):
"""
Iterate through the dataset and do inference.
Output of this worker will be init and updated(after a batch) here using
`self._output_init` and `self._output_update`.
"""
output = self._init_output()
for batch_idx, data in enumerate(self.data_loader):
batch_start_time = time.time()
self._setup_writer()
data = self._data_to_device(data)
data['batch_idx'] = batch_idx
model_output, loss = self._run_and_optimize_model(data)
products = {
'data': data,
'model_output': model_output,
'loss': loss,
}
if batch_idx % global_config.log_step == 0:
self._write_data_to_tensorboard(data, model_output)
if global_config.verbosity >= 2:
self._print_log(epoch, batch_idx, batch_start_time, loss)
output = self._update_output(output, products)
return output
def run(self, epoch):
self._setup_model()
with torch.set_grad_enabled(self.enable_grad):
epoch_output = self._iter_data(epoch)
output = self._finalize_output(epoch_output)
return output
| 6,516 | 38.981595 | 101 | py |
xcos | xcos-master/src/worker/validator.py | import torch
from .training_worker import TrainingWorker
from pipeline.base_pipeline import BasePipeline
class Validator(TrainingWorker):
"""
Validator class
Note:
Inherited from WorkerTemplate.
"""
def __init__(self, pipeline: BasePipeline, *args):
super().__init__(pipeline, *args)
# Some shared attributes are validator exclusive and therefore is initialized here
for attr_name in ['loss_functions', 'optimize_strategy', 'validation_strategy']:
setattr(self, attr_name, getattr(pipeline, attr_name))
if self.optimize_strategy == 'GAN':
attr_name = 'gan_loss_functions'
setattr(self, attr_name, getattr(pipeline, attr_name))
self.evaluation_metrics = self._filter_evaluation_metrics(self.evaluation_metrics, scenario='validation')
@property
def enable_grad(self):
return False
def _run_and_optimize_model(self, data):
if self.validation_strategy == self.optimize_strategy:
if self.optimize_strategy == 'normal':
model_output = self.model(data)
losses, total_loss = self._get_and_write_losses(data, model_output)
elif self.optimize_strategy == 'GAN':
model_output = self.model(data, scenario='generator_only')
losses, total_loss = self._get_and_write_losses(data, model_output)
elif self.validation_strategy == "bypass_loss_calculation":
model_output = self.model(data, scenario='get_feature_and_xcos')
total_loss = torch.zeros([1])
return model_output, total_loss
def _setup_model(self):
self.model.eval()
| 1,686 | 39.166667 | 113 | py |
xcos | xcos-master/src/worker/trainer.py | import time
import numpy as np
from .training_worker import TrainingWorker
from utils.logging_config import logger
from utils.util import get_lr
from utils.global_config import global_config
from pipeline.base_pipeline import BasePipeline
class Trainer(TrainingWorker):
"""
Trainer class
Note:
Inherited from WorkerTemplate.
"""
def __init__(self, pipeline: BasePipeline, *args):
super().__init__(pipeline, *args)
# Some shared attributes are trainer exclusive and therefore is initialized here
shared_attrs = ['optimizers', 'loss_functions']
shared_attrs += ['gan_loss_functions'] if self.optimize_strategy == 'GAN' else []
for attr_name in shared_attrs:
setattr(self, attr_name, getattr(pipeline, attr_name))
self.evaluation_metrics = self._filter_evaluation_metrics(self.evaluation_metrics, scenario='training')
@property
def enable_grad(self):
return True
def _print_log(self, epoch, batch_idx, batch_start_time, loss):
current_sample_idx = batch_idx * self.data_loader.batch_size
total_sample_num = self.data_loader.n_samples
sample_percentage = 100.0 * batch_idx / len(self.data_loader)
batch_time = time.time() - batch_start_time
logger.info(
f'Epoch: {epoch} [{current_sample_idx}/{total_sample_num} '
f' ({sample_percentage:.0f}%)] '
f'loss_total: {loss.item():.6f}, '
f'BT: {batch_time:.2f}s'
)
def _run_and_optimize_model(self, data):
if self.optimize_strategy == 'normal':
self.optimizers['default'].zero_grad()
model_output = self.model(data)
_, total_loss = self._get_and_write_losses(data, model_output)
total_loss.backward()
self.optimizers['default'].step()
elif self.optimize_strategy == 'multitasking':
for optimizer_name in self.optimizers.keys():
self.optimizers[optimizer_name].zero_grad()
model_output = self.model(data, 'normal')
_, total_loss = self._get_and_write_losses(data, model_output)
total_loss.backward()
for optimizer_name in self.optimizers.keys():
self.optimizers[optimizer_name].step()
elif self.optimize_strategy == 'GAN':
total_loss = 0
for optimizer_name in self.optimizers.keys():
self.optimizers[optimizer_name].zero_grad()
forward_scenario = global_config['optimizers'][optimizer_name]['forward_scenario']
model_output = self.model(data, forward_scenario)
loss = self._get_and_write_gan_loss(data, model_output, optimizer_name)
loss.backward()
total_loss += loss
self.optimizers[optimizer_name].step()
return model_output, total_loss
def _setup_model(self):
np.random.seed()
self.model.train()
for key, optimizer in self.optimizers.items():
logger.info(f'Current lr of optimizer {key}: {get_lr(optimizer)}')
| 3,137 | 36.357143 | 111 | py |
xcos | xcos-master/src/data_loader/data_loaders.py | import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) # noqa
from torchvision import transforms
from .base_data_loader import BaseDataLoader
from .mnist import MnistDataset
from .mnist_result import MnistResultDataset
from .face_datasets import SiameseImageFolder, InsightFaceBinaryImg, ARFaceDataset, GeneGANDataset
class FaceDataLoader(BaseDataLoader):
"""
Customized MNIST data loader demo
Returned data will be in dictionary
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0,
num_workers=1, name=None,
norm_mean=(0.5, 0.5, 0.5), norm_std=(0.5, 0.5, 0.5)):
trsfm = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=norm_mean, std=norm_std)
])
self.data_dir = data_dir
self.dataset = SiameseImageFolder(data_dir, trsfm)
self.name = self.__class__.__name__ if name is None else name
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
class FaceBinDataLoader(BaseDataLoader):
"""
Customized Face data loader that load val data from bin files
Returned data will be in dictionary
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0,
num_workers=1, name="lfw", nickname=None, mask_dir=None,
norm_mean=(0.5, 0.5, 0.5), norm_std=(0.5, 0.5, 0.5),
use_bgr=True):
if use_bgr:
trsfm = transforms.Compose([
transforms.ToTensor()
])
else:
trsfm = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=norm_mean, std=norm_std)
])
self.data_dir = data_dir
self.dataset = InsightFaceBinaryImg(data_dir, name, trsfm, mask_dir, use_bgr)
self.name = self.__class__.__name__ if name is None else name
self.name = nickname if nickname is not None else self.name
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
class MnistDataLoader(BaseDataLoader):
"""
Customized MNIST data loader demo
Returned data will be in dictionary
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0,
num_workers=1, training=True, name=None,
img_size=28, norm_mean=(0.1307,), norm_std=(0.3081,)):
trsfm = transforms.Compose([
transforms.Scale(img_size),
transforms.ToTensor(),
transforms.Normalize(mean=norm_mean, std=norm_std)
])
self.data_dir = data_dir
self.dataset = MnistDataset(self.data_dir, train=training, download=True, transform=trsfm)
self.name = self.__class__.__name__ if name is None else name
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
class MnistResultDataLoader(BaseDataLoader):
"""
Customized MNIST result data loader demo
Returned data will be in dictionary
"""
def __init__(self, dataset_args, batch_size, num_workers=1, training=True, name=None):
self.dataset = MnistResultDataset(**dataset_args)
self.name = self.__class__.__name__ if name is None else name
super().__init__(self.dataset, batch_size, False, 0, num_workers)
class ARFaceDataLoader(BaseDataLoader):
"""
Customized Face data loader that load val data from ARFace
Returned data will be in dictionary
"""
def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0,
num_workers=1, name=None, norm_mean=(0.5, 0.5, 0.5), norm_std=(0.5, 0.5, 0.5)):
trsfm = transforms.Compose([
transforms.Resize([112, 112]),
transforms.ToTensor(),
transforms.Normalize(mean=norm_mean, std=norm_std)
])
self.data_dir = data_dir
self.dataset = ARFaceDataset(data_dir, trsfm)
self.name = self.__class__.__name__ if name is None else name
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
class GeneGANDataLoader(BaseDataLoader):
"""
Customized Face data loader that load data from augemented GeneGAN data
Returned data will be in dictionary
"""
def __init__(self, data_dir, batch_size, identity_txt, shuffle=True, validation_split=0.0,
num_workers=1, name=None,
norm_mean=(0.5, 0.5, 0.5), norm_std=(0.5, 0.5, 0.5)):
trsfm = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=norm_mean, std=norm_std)
])
self.data_dir = data_dir
self.dataset = GeneGANDataset(data_dir, identity_txt, trsfm)
self.name = self.__class__.__name__ if name is None else name
super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)
| 5,081 | 40.655738 | 98 | py |
xcos | xcos-master/src/data_loader/base_data_loader.py | import numpy as np
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils.data.sampler import SubsetRandomSampler
# Add this to initialize workers of dataloader to avoid fixed numpy random
# seeds for each training epoch. For a clearer explanation please refer to:
# https://github.com/pytorch/pytorch/issues/5059
def worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
class BaseDataLoader(DataLoader):
"""
Base class for all data loaders
"""
def __init__(self, dataset, batch_size, shuffle, validation_split, num_workers, collate_fn=default_collate):
self.validation_split = validation_split
self.shuffle = shuffle
self.batch_idx = 0
self.n_samples = len(dataset)
self.sampler, self.valid_sampler = self._split_sampler(self.validation_split)
self.init_kwargs = {
'dataset': dataset,
'batch_size': batch_size,
'shuffle': self.shuffle,
'collate_fn': collate_fn,
'num_workers': num_workers
}
super(BaseDataLoader, self).__init__(sampler=self.sampler, **self.init_kwargs,
worker_init_fn=worker_init_fn)
def _split_sampler(self, split):
if split == 0.0:
return None, None
idx_full = np.arange(self.n_samples)
np.random.seed(0)
np.random.shuffle(idx_full)
len_valid = int(self.n_samples * split)
valid_idx = idx_full[0:len_valid]
train_idx = np.delete(idx_full, np.arange(0, len_valid))
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# turn off shuffle option which is mutually exclusive with sampler
self.shuffle = False
self.n_samples = len(train_idx)
return train_sampler, valid_sampler
def split_validation(self):
if self.valid_sampler is None:
return None
else:
valid_data_loader = DataLoader(sampler=self.valid_sampler, **self.init_kwargs)
valid_data_loader.name = 'valid_' + self.name
return valid_data_loader
| 2,242 | 32.477612 | 112 | py |
xcos | xcos-master/src/data_loader/face_datasets.py | import cv2
import os
import os.path as op
import warnings
from glob import glob
import numpy as np
import pandas as pd
from PIL import Image
import bcolz
import torch
import torch.nn as nn
from torchvision import transforms, datasets
from torch.utils.data import Dataset
from torch.utils.data.sampler import BatchSampler
from torchvision.datasets import ImageFolder
import random
from PIL import ImageFile
from utils.align import Alignment
from utils.util_python import read_lines_into_list
cos = nn.CosineSimilarity(dim=0, eps=1e-6)
# ImageFile is useless?
ImageFile.LOAD_TRUNCATED_IMAGES = True
class myImageFolder(ImageFolder):
@property
def train_labels(self):
warnings.warn("train_labels has been renamed targets")
return self.targets
def __init__(self, root, transform=None, target_transform=None):
super(myImageFolder, self).__init__(root, transform, target_transform)
class InsightFaceBinaryImg(Dataset):
def __init__(self, root_folder, dataset_name, transform=None, mask_dir=None, use_bgr=True):
self.root = root_folder
self.name = dataset_name
self.transform = transform
self.img_arr, self.is_same_arr = self.get_val_pair(self.root, self.name)
self.mask_dir = mask_dir
self.use_bgr = use_bgr
if self.mask_dir is not None:
assert op.isdir(self.mask_dir)
self.mask_files = glob(op.join(self.mask_dir, '*.png'))
def __getitem__(self, index):
img_pair = self.img_arr[index * 2: (index + 1) * 2]
if not self.use_bgr:
# Shape: from [2, c, h, w] to [2, h, w, c]
img_pair = np.transpose(img_pair, (0, 2, 3, 1))
# Range: [-1, +1] --> [0, 255]
img_pair = ((img_pair + 1) * 0.5 * 255).astype(np.uint8)
if self.mask_dir is not None:
# Randomly choose one profile from the pair.
mask_img_idx = np.random.choice(2)
mask_file = np.random.choice(self.mask_files)
img_pair[mask_img_idx] = self.apply_mask(img_pair[mask_img_idx], mask_file)
# BGR2RGB
img_pair_tmp = []
for img in img_pair:
if not self.use_bgr:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if self.transform is not None:
img = self.transform(img)
else:
raise NotImplementedError
else:
img = torch.tensor(img)
img_pair_tmp.append(img)
# img_pair = torch.stack(img_pair_tmp)
is_same_label = self.is_same_arr[index]
return {
"data_input": (img_pair_tmp[0], img_pair_tmp[1]),
"is_same_labels": is_same_label,
"index": index
}
def __len__(self):
return len(self.is_same_arr)
def get_val_pair(self, path, name):
carray = bcolz.carray(rootdir=op.join(path, name), mode="r")
issame = np.load(op.join(path, "{}_list.npy".format(name)))
return carray, issame
def apply_mask(self, image, mask_path):
"""Apply the binary mask to one image.
Arguments:
image {np.array} -- of shape (h, w, c)
mask_path {str} -- file path for one mask
Returns:
np.array -- masked image
"""
mask = Image.open(mask_path)
masked = np.array(image) * np.expand_dims(np.array(mask), 2)
return masked
class SiameseDFWImageFolder(Dataset):
"""
Train: For each sample creates randomly a positive or a negative pair
Test: Creates fixed pairs for testing
"""
def __init__(self, imgs_folder_dir, transform, dataset_type="training"):
assert dataset_type in ["training", "testing"]
print(">>> In SIFolder, imgfolderdir=", imgs_folder_dir)
self.root = imgs_folder_dir
self.dataset_type = dataset_type
matrix_txt_path = os.path.join(
self.root,
"Mask_matrices",
dataset_type,
f"{dataset_type}_data_mask_matrix.txt",
)
self.data_mask_matrix = np.loadtxt(matrix_txt_path)
img_path_list_path = os.path.join(
self.root, f"{dataset_type.capitalize()}_data_face_name.txt"
)
self.img_path_list = read_lines_into_list(img_path_list_path)
self.img_label_list, self.name2label = self.img_path_to_label_list(
self.img_path_list
)
self.transform = transform
# ############################################
# self.wFace_dataset = ImageFolder(imgs_folder_dir, transform)
self.class_num = len(self.name2label)
# ##################################
# # self.memoryAll = False
# self.train_labels = np.array(self.wFace_dataset.targets, dtype=int)
# print('>>> self.train_labels:', self.train_labels[1000:1010])
# self.train_data = self.wFace_dataset
# self.labels_set = set(self.train_labels)
# self.label_to_indices = {label:
# np.where(self.train_labels
# == label)[0]
# for label in self.labels_set}
# print('>>> Init SiameseDFWImageFolder done!')
def __getitem__(self, idx):
"""
img1 = (feat_fc, feat_grid)
"""
# print('>>> In getItem, idx = ', idx)
# Sample the 1-st image
img1_path = os.path.join(self.root, self.img_path_list[idx])
img1 = self.load_transformed_img_tensor(img1_path)
label1 = self.img_label_list[idx]
# Sample the 2-nd image
# is_the_same_id is a bool that determines whether returning one pair with the same identity.
is_the_same_id = np.random.randint(0, 2)
############
img2_path = self.get_siamese_path(idx, is_the_same_id)
img2_path = os.path.join(self.root, img2_path)
# print("In getitem, img2_path: ", img2_path)
# print("In getitem, img1_path: ", img1_path)
img2 = self.load_transformed_img_tensor(img2_path)
label2 = self.img_path_to_label(img2_path)
###################################
# img1, label1 = self.train_data[index] # , self.train_labels[index].item()
# if target == 1:
# siamese_index = index
# while siamese_index == index:
# siamese_index = np.random.choice(self.label_to_indices[label1])
# else:
# siamese_label = np.random.choice(
# list(self.labels_set - set([label1])))
# siamese_index = np.random.choice(
# self.label_to_indices[siamese_label])
# img2, label2 = self.train_data[siamese_index]
return img1, img2, label1, label2
def __len__(self):
return len(self.img_path_list)
def img_path_to_label_list(self, path_list):
label_list = []
name_list = []
name2label = {}
for path in path_list:
# path e.g. Training_data/Matthew_McConaughey/Matthew_McConaughey_h_002.jpg
# Assume that Imposter Impersonator is one unique identity
if "_I_" in path:
name = path.split("/")[-1][:-8]
else:
name = path.split("/")[1]
if name not in name_list:
name_list.append(name)
name2label[name] = len(name_list) - 1
label = name2label[name]
label_list.append(label)
return label_list, name2label
def img_path_to_label(self, path):
# path e.g. data/dfw/Training_data/Matthew_McConaughey/Matthew_McConaughey_h_003.jpg
if "_I_" in path:
name = path.split("/")[-1][:-8]
else:
name = path.split("/")[3]
return self.name2label[name]
def load_transformed_img_tensor(self, path):
img = datasets.folder.default_loader(path)
# XXX
t = transforms.Resize([112, 112])
img = t(img)
# print(img)
# print('>>>>> In load_tr, img.size =', img.size())
if self.transform is not None:
img = self.transform(img)
else:
raise NotImplementedError
return img
def get_siamese_path(self, idx, is_the_same_id):
"""
Input:
"""
candidate = self.data_mask_matrix[idx]
positions = []
# print(">>>> Is the same", is_the_same_id)
if is_the_same_id:
targets = [1, 2]
for target in targets:
pos = np.where(candidate == target)[0]
pos = list(pos)
# print(">>>> candidate=", candidate)
# print(">>>> pos= ", pos)
positions += pos
# _I.jpg case (no identical id)
if len(positions) == 0:
pos3 = np.where(candidate == 3)[0]
pos3 = list(pos3)
positions += pos3
else:
pos3 = np.where(candidate == 3)[0]
pos4 = np.where(candidate == 4)[0]
pos3 = list(pos3)
pos4 = list(pos4)
# print(">>>> candidate=", candidate)
# print(">>>> pos3= ", pos3)
# print(">>>> pos4= ", pos4)
# _I.jpg case
if len(pos4) > 0:
pos4 = random.sample(pos4, max(len(pos3), 1)) # at least take 1 sample
positions += pos4
positions += pos3
assert len(positions) > 0
siamese_idx = random.choice(positions)
return self.img_path_list[siamese_idx]
class SiameseImageFolder(Dataset):
"""
Train: For each sample creates randomly a positive or a negative pair
Test: Creates fixed pairs for testing
"""
def __init__(self, imgs_folder_dir, transform):
print(">>> In SIFolder, imgfolderdir=", imgs_folder_dir)
self.root = imgs_folder_dir
self.wFace_dataset = ImageFolder(imgs_folder_dir, transform)
self.class_num = len(self.wFace_dataset.classes)
print(">>> self.class_num = ", self.class_num)
self.train_labels = np.array(self.wFace_dataset.targets, dtype=int)
print(">>> self.train_labels:", self.train_labels[1000:1010])
self.train_data = self.wFace_dataset
self.labels_set = set(self.train_labels)
self.label_to_indices = {
label: np.where(self.train_labels == label)[0] for label in self.labels_set
}
print(">>> Init SiameseImageFolder done!")
def __getitem__(self, index):
"""
img1 = (feat_fc, feat_grid)
"""
target = np.random.randint(0, 2)
img1, label1 = self.train_data[index] # , self.train_labels[index].item()
if target == 1:
siamese_index = index
while siamese_index == index:
siamese_index = np.random.choice(self.label_to_indices[label1])
else:
siamese_label = np.random.choice(list(self.labels_set - set([label1])))
siamese_index = np.random.choice(self.label_to_indices[siamese_label])
img2, label2 = self.train_data[siamese_index]
return {"data_input": (img1, img2), "targeted_id_labels": (label1, label2)}
def __len__(self):
return len(self.wFace_dataset)
class SiameseWholeFace(Dataset):
"""
Train: For each sample creates randomly a positive or a negative pair
Test: Creates fixed pairs for testing
"""
# @property
# def train_data(self):
# warnings.warn("train_data has been renamed data")
# return self.wFace_dataset
# @property
# def test_data(self):
# warnings.warn("test_data has been renamed data")
# return self.wFace_dataset
def __init__(self, wFace_dataset):
self.wFace_dataset = wFace_dataset
self.train = self.wFace_dataset.train
self.memoryAll = self.wFace_dataset.memoryAll
if self.train:
self.train_labels = self.wFace_dataset.train_labels
self.train_data = self.wFace_dataset
if self.memoryAll:
self.train_data = self.wFace_dataset.train_data
self.labels_set = set(self.train_labels.numpy())
self.label_to_indices = {
label: np.where(self.train_labels.numpy() == label)[0]
for label in self.labels_set
}
else:
# generate fixed pairs for testing
# TODO: @property like MNIST
self.test_labels = self.wFace_dataset.test_labels
self.test_data = self.wFace_dataset
if self.memoryAll:
self.test_data = self.wFace_dataset.test_data
self.labels_set = set(self.test_labels.numpy())
self.label_to_indices = {
label: np.where(self.test_labels.numpy() == label)[0]
for label in self.labels_set
}
random_state = np.random.RandomState(29)
positive_pairs = [
[
i,
random_state.choice(
self.label_to_indices[self.test_labels[i].item()]
),
1,
]
for i in range(0, len(self.test_data), 2)
]
negative_pairs = [
[
i,
random_state.choice(
self.label_to_indices[
np.random.choice(
list(
self.labels_set - set([self.test_labels[i].item()])
)
)
]
),
0,
]
for i in range(1, len(self.test_data), 2)
]
self.test_pairs = positive_pairs + negative_pairs
print(">>> Init SiameseWholeFace done!")
def __getitem__(self, index):
"""
img1 = (feat_fc, feat_grid)
"""
if self.train:
target = np.random.randint(0, 2)
img1, label1 = self.train_data[index], self.train_labels[index].item()
if target == 1:
siamese_index = index
while siamese_index == index:
siamese_index = np.random.choice(self.label_to_indices[label1])
else:
siamese_label = np.random.choice(list(self.labels_set - set([label1])))
siamese_index = np.random.choice(self.label_to_indices[siamese_label])
img2 = self.train_data[siamese_index]
else:
img1 = self.test_data[self.test_pairs[index][0]]
img2 = self.test_data[self.test_pairs[index][1]]
target = self.test_pairs[index][2]
# [Depreciated] feat1 1 is of size [21504]
# feat1, feat2 = img1.view(-1), img2.view(-1)
# cosine = cos(feat1, feat2).numpy()
# target = cosine
feat_grid_1, feat_fc_1 = img1
feat_grid_2, feat_fc_2 = img2
return (feat_grid_1, feat_fc_1, feat_grid_2, feat_fc_2), target
def __len__(self):
return len(self.wFace_dataset)
class SiameseENM(Dataset):
"""
Train: For each sample creates randomly a positive or a negative pair
Test: Creates fixed pairs for testing
"""
def __init__(self, ENM_dataset):
self.ENM_dataset = ENM_dataset
self.train = self.ENM_dataset.train
# self.train = False
if self.train:
self.train_labels = self.ENM_dataset.train_labels
self.train_data = self.ENM_dataset.train_data
self.labels_set = set(self.train_labels.numpy())
self.label_to_indices = {
label: np.where(self.train_labels.numpy() == label)[0]
for label in self.labels_set
}
else:
# generate fixed pairs for testing
# TODO: @property like MNIST
self.test_labels = self.ENM_dataset.test_labels
self.test_data = self.ENM_dataset.test_data
self.labels_set = set(self.test_labels.numpy())
self.label_to_indices = {
label: np.where(self.test_labels.numpy() == label)[0]
for label in self.labels_set
}
random_state = np.random.RandomState(29)
positive_pairs = [
[
i,
random_state.choice(
self.label_to_indices[self.test_labels[i].item()]
),
1,
]
for i in range(0, len(self.test_data), 2)
]
negative_pairs = [
[
i,
random_state.choice(
self.label_to_indices[
np.random.choice(
list(
self.labels_set - set([self.test_labels[i].item()])
)
)
]
),
0,
]
for i in range(1, len(self.test_data), 2)
]
self.test_pairs = positive_pairs + negative_pairs
def __getitem__(self, index):
if self.train:
target = np.random.randint(0, 2)
img1, label1 = self.train_data[index], self.train_labels[index].item()
if target == 1:
siamese_index = index
while siamese_index == index:
siamese_index = np.random.choice(self.label_to_indices[label1])
else:
siamese_label = np.random.choice(list(self.labels_set - set([label1])))
siamese_index = np.random.choice(self.label_to_indices[siamese_label])
img2 = self.train_data[siamese_index]
else:
img1 = self.test_data[self.test_pairs[index][0]]
img2 = self.test_data[self.test_pairs[index][1]]
target = self.test_pairs[index][2]
return (img1, img2), target
def __len__(self):
return len(self.ENM_dataset)
class TripletENM(Dataset):
"""
Train: For each sample (anchor) randomly chooses a positive and negative samples
Test: Creates fixed triplets for testing
"""
def __init__(self, ENM_dataset):
self.ENM_dataset = ENM_dataset
self.train = self.ENM_dataset.train
if self.train:
self.train_labels = self.ENM_dataset.train_labels
self.train_data = self.ENM_dataset.train_data
self.labels_set = set(self.train_labels.numpy())
self.label_to_indices = {
label: np.where(self.train_labels.numpy() == label)[0]
for label in self.labels_set
}
else:
self.test_labels = self.ENM_dataset.test_labels
self.test_data = self.ENM_dataset.test_data
# generate fixed triplets for testing
self.labels_set = set(self.test_labels.numpy())
self.label_to_indices = {
label: np.where(self.test_labels.numpy() == label)[0]
for label in self.labels_set
}
random_state = np.random.RandomState(29)
triplets = [
[
i,
random_state.choice(
self.label_to_indices[self.test_labels[i].item()]
),
random_state.choice(
self.label_to_indices[
np.random.choice(
list(
self.labels_set - set([self.test_labels[i].item()])
)
)
]
),
]
for i in range(len(self.test_data))
]
self.test_triplets = triplets
def __getitem__(self, index):
if self.train:
img1, label1 = self.train_data[index], self.train_labels[index].item()
positive_index = index
while positive_index == index:
positive_index = np.random.choice(self.label_to_indices[label1])
negative_label = np.random.choice(list(self.labels_set - set([label1])))
negative_index = np.random.choice(self.label_to_indices[negative_label])
img2 = self.train_data[positive_index]
img3 = self.train_data[negative_index]
else:
img1 = self.test_data[self.test_triplets[index][0]]
img2 = self.test_data[self.test_triplets[index][1]]
img3 = self.test_data[self.test_triplets[index][2]]
return (img1, img2, img3), []
def __len__(self):
return len(self.ENM_dataset)
class SiameseMNIST(Dataset):
"""
Train: For each sample creates randomly a positive or a negative pair
Test: Creates fixed pairs for testing
"""
def __init__(self, mnist_dataset):
self.mnist_dataset = mnist_dataset
self.train = self.mnist_dataset.train
self.transform = self.mnist_dataset.transform
if self.train:
self.train_labels = self.mnist_dataset.train_labels
self.train_data = self.mnist_dataset.train_data
self.labels_set = set(self.train_labels.numpy())
self.label_to_indices = {
label: np.where(self.train_labels.numpy() == label)[0]
for label in self.labels_set
}
else:
# generate fixed pairs for testing
self.test_labels = self.mnist_dataset.test_labels
self.test_data = self.mnist_dataset.test_data
self.labels_set = set(self.test_labels.numpy())
self.label_to_indices = {
label: np.where(self.test_labels.numpy() == label)[0]
for label in self.labels_set
}
random_state = np.random.RandomState(29)
positive_pairs = [
[
i,
random_state.choice(
self.label_to_indices[self.test_labels[i].item()]
),
1,
]
for i in range(0, len(self.test_data), 2)
]
negative_pairs = [
[
i,
random_state.choice(
self.label_to_indices[
np.random.choice(
list(
self.labels_set - set([self.test_labels[i].item()])
)
)
]
),
0,
]
for i in range(1, len(self.test_data), 2)
]
self.test_pairs = positive_pairs + negative_pairs
def __getitem__(self, index):
if self.train:
target = np.random.randint(0, 2)
img1, label1 = self.train_data[index], self.train_labels[index].item()
if target == 1:
siamese_index = index
while siamese_index == index:
siamese_index = np.random.choice(self.label_to_indices[label1])
else:
siamese_label = np.random.choice(list(self.labels_set - set([label1])))
siamese_index = np.random.choice(self.label_to_indices[siamese_label])
img2 = self.train_data[siamese_index]
else:
img1 = self.test_data[self.test_pairs[index][0]]
img2 = self.test_data[self.test_pairs[index][1]]
target = self.test_pairs[index][2]
img1 = Image.fromarray(img1.numpy(), mode="L")
img2 = Image.fromarray(img2.numpy(), mode="L")
if self.transform is not None:
img1 = self.transform(img1)
img2 = self.transform(img2)
return (img1, img2), target
def __len__(self):
return len(self.mnist_dataset)
class TripletMNIST(Dataset):
"""
Train: For each sample (anchor) randomly chooses a positive and negative samples
Test: Creates fixed triplets for testing
"""
def __init__(self, mnist_dataset):
self.mnist_dataset = mnist_dataset
self.train = self.mnist_dataset.train
self.transform = self.mnist_dataset.transform
if self.train:
self.train_labels = self.mnist_dataset.train_labels
self.train_data = self.mnist_dataset.train_data
self.labels_set = set(self.train_labels.numpy())
self.label_to_indices = {
label: np.where(self.train_labels.numpy() == label)[0]
for label in self.labels_set
}
else:
self.test_labels = self.mnist_dataset.test_labels
self.test_data = self.mnist_dataset.test_data
# generate fixed triplets for testing
self.labels_set = set(self.test_labels.numpy())
self.label_to_indices = {
label: np.where(self.test_labels.numpy() == label)[0]
for label in self.labels_set
}
random_state = np.random.RandomState(29)
triplets = [
[
i,
random_state.choice(
self.label_to_indices[self.test_labels[i].item()]
),
random_state.choice(
self.label_to_indices[
np.random.choice(
list(
self.labels_set - set([self.test_labels[i].item()])
)
)
]
),
]
for i in range(len(self.test_data))
]
self.test_triplets = triplets
def __getitem__(self, index):
if self.train:
img1, label1 = self.train_data[index], self.train_labels[index].item()
positive_index = index
while positive_index == index:
positive_index = np.random.choice(self.label_to_indices[label1])
negative_label = np.random.choice(list(self.labels_set - set([label1])))
negative_index = np.random.choice(self.label_to_indices[negative_label])
img2 = self.train_data[positive_index]
img3 = self.train_data[negative_index]
else:
img1 = self.test_data[self.test_triplets[index][0]]
img2 = self.test_data[self.test_triplets[index][1]]
img3 = self.test_data[self.test_triplets[index][2]]
img1 = Image.fromarray(img1.numpy(), mode="L")
img2 = Image.fromarray(img2.numpy(), mode="L")
img3 = Image.fromarray(img3.numpy(), mode="L")
if self.transform is not None:
img1 = self.transform(img1)
img2 = self.transform(img2)
img3 = self.transform(img3)
return (img1, img2, img3), []
def __len__(self):
return len(self.mnist_dataset)
class BalancedBatchSampler(BatchSampler):
"""
BatchSampler - from a MNIST-like dataset, samples n_classes and within these classes samples n_samples.
Returns batches of size n_classes * n_samples
"""
def __init__(self, labels, n_classes, n_samples):
self.labels = labels
self.labels_set = list(set(self.labels.numpy()))
self.label_to_indices = {
label: np.where(self.labels.numpy() == label)[0]
for label in self.labels_set
}
for l in self.labels_set:
np.random.shuffle(self.label_to_indices[l])
self.used_label_indices_count = {label: 0 for label in self.labels_set}
self.count = 0
self.n_classes = n_classes
self.n_samples = n_samples
self.n_dataset = len(self.labels)
self.batch_size = self.n_samples * self.n_classes
def __iter__(self):
self.count = 0
while self.count + self.batch_size < self.n_dataset:
classes = np.random.choice(self.labels_set, self.n_classes, replace=False)
indices = []
for class_ in classes:
indices.extend(
self.label_to_indices[class_][
self.used_label_indices_count[
class_
]: self.used_label_indices_count[class_]
+ self.n_samples
]
)
self.used_label_indices_count[class_] += self.n_samples
if self.used_label_indices_count[class_] + self.n_samples > len(
self.label_to_indices[class_]
):
np.random.shuffle(self.label_to_indices[class_])
self.used_label_indices_count[class_] = 0
yield indices
self.count += self.n_classes * self.n_samples
def __len__(self):
return self.n_dataset // self.batch_size
class IJBCVerificationBaseDataset(Dataset):
"""
Base class of IJB-C verification dataset to read neccesary
csv files and provide general functions.
"""
def __init__(self, ijbc_data_root, leave_ratio=1.0):
# read all csvs neccesary for verification
self.ijbc_data_root = ijbc_data_root
dtype_sid_tid = {"SUBJECT_ID": str, "TEMPLATE_ID": str}
self.metadata = pd.read_csv(
op.join(ijbc_data_root, "protocols", "ijbc_metadata_with_age.csv"),
dtype=dtype_sid_tid,
)
test1_dir = op.join(ijbc_data_root, "protocols", "test1")
self.enroll_templates = pd.read_csv(
op.join(test1_dir, "enroll_templates.csv"), dtype=dtype_sid_tid
)
self.verif_templates = pd.read_csv(
op.join(test1_dir, "verif_templates.csv"), dtype=dtype_sid_tid
)
self.match = pd.read_csv(op.join(test1_dir, "match.csv"), dtype=str)
if leave_ratio < 1.0: # shrink the number of verified pairs
indice = np.arange(len(self.match))
np.random.seed(0)
np.random.shuffle(indice)
left_number = int(len(self.match) * leave_ratio)
self.match = self.match.iloc[indice[:left_number]]
def _get_both_entries(self, idx):
enroll_tid = self.match.iloc[idx]["ENROLL_TEMPLATE_ID"]
verif_tid = self.match.iloc[idx]["VERIF_TEMPLATE_ID"]
enroll_entries = self.enroll_templates[
self.enroll_templates.TEMPLATE_ID == enroll_tid
]
verif_entries = self.verif_templates[
self.verif_templates.TEMPLATE_ID == verif_tid
]
return enroll_entries, verif_entries
def _get_cropped_path_suffix(self, entry):
sid = entry["SUBJECT_ID"]
filepath = entry["FILENAME"]
img_or_frames, fname = op.split(filepath)
fname_index, _ = op.splitext(fname)
cropped_path_suffix = op.join(img_or_frames, f"{sid}_{fname_index}.jpg")
return cropped_path_suffix
def __len__(self):
return len(self.match)
class IJBCVerificationDataset(IJBCVerificationBaseDataset):
"""
IJB-C verification dataset (`test1` in the folder) who transforms
the cropped faces into tensors.
Note that entries in this verification dataset contains lots of
repeated faces. A better way to evaluate a model's score is to
precompute all faces features and store them into disks. (
see `IJBCAllCroppedFacesDataset` and `IJBCVerificationPathDataset`)
"""
def __init__(self, ijbc_data_root):
super().__init__(ijbc_data_root)
self.transforms = transforms.Compose(
[
transforms.Resize([112, 112]),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
]
)
def _get_cropped_face_image_by_entry(self, entry):
cropped_path_suffix = self._get_cropped_path_suffix(entry)
cropped_path = op.join(
self.ijbc_data_root, "cropped_faces", cropped_path_suffix
)
return Image.open(cropped_path)
def _get_tensor_by_entries(self, entries):
faces_imgs = [
self._get_cropped_face_image_by_entry(e) for idx, e in entries.iterrows()
]
faces_tensors = [self.transforms(img) for img in faces_imgs]
return torch.stack(faces_tensors, dim=0)
def __getitem__(self, idx):
enroll_entries, verif_entries = self._get_both_entries(idx)
enroll_faces_tensor = self._get_tensor_by_entries(enroll_entries)
verif_faces_tensor = self._get_tensor_by_entries(verif_entries)
return {
"enroll_faces_tensor": enroll_faces_tensor,
"verif_faces_tensor": verif_faces_tensor,
}
class IJBCVerificationPathDataset(IJBCVerificationBaseDataset):
"""
This dataset read the match file of verification set in IJB-C
(in the `test1` directory) and output the cropped faces' paths
of both enroll_template and verif_template for each match.
Models outside can use the path information to read their stored
features and compute the similarity score of enroll_template and
verif_template.
"""
def __init__(self, ijbc_data_root, occlusion_lower_bound=0, leave_ratio=1.0):
super().__init__(ijbc_data_root, leave_ratio=leave_ratio)
self.occlusion_lower_bound = occlusion_lower_bound
self.metadata["OCC_sum"] = self.metadata[[f"OCC{i}" for i in range(1, 19)]].sum(
axis=1
)
self.reindexed_meta = self.metadata.set_index(["SUBJECT_ID", "FILENAME"])
def _filter_out_occlusion_insufficient_entries(self, entries):
if self.occlusion_lower_bound == 0:
return [entry for _, entry in entries.iterrows()]
out = []
for _, entry in entries.iterrows():
occlusion_sum = self.reindexed_meta.loc[
(entry["SUBJECT_ID"], entry["FILENAME"]), "OCC_sum"
]
if occlusion_sum.values[0] >= self.occlusion_lower_bound:
out.append(entry)
return out
def __getitem__(self, idx):
enroll_entries, verif_entries = self._get_both_entries(idx)
is_same = (
enroll_entries["SUBJECT_ID"].iloc[0] == verif_entries["SUBJECT_ID"].iloc[0]
)
is_same = 1 if is_same else 0
enroll_template_id = (enroll_entries["TEMPLATE_ID"].iloc[0],)
verif_template_id = (verif_entries["TEMPLATE_ID"].iloc[0],)
enroll_entries = self._filter_out_occlusion_insufficient_entries(enroll_entries)
verif_entries = self._filter_out_occlusion_insufficient_entries(verif_entries)
def path_suffixes(entries):
return [self._get_cropped_path_suffix(entry) for entry in entries]
return {
"enroll_template_id": enroll_template_id,
"verif_template_id": verif_template_id,
"enroll_path_suffixes": path_suffixes(enroll_entries),
"verif_path_suffixes": path_suffixes(verif_entries),
"is_same": is_same,
}
class IJBVerificationPathDataset(Dataset):
"""
This dataset read the match file of verification set in ijb_dataset_root
(in the `meta` directory, the filename is sth. like
"ijbc_template_pair_label.txt") and output the cropped faces'
paths of both enroll_template and verif_template for each match.
Models outside can use the path information to read their stored
features and compute the similarity score of enroll_template and
verif_template.
"""
def __init__(self, ijb_dataset_root, leave_ratio=1.0, dataset_type="IJBB"):
# TODO implement the leave_ratio method
if dataset_type == "IJBB":
match_filename = op.join(
ijb_dataset_root, "meta", "ijbb_template_pair_label.txt"
)
elif dataset_type == "IJBC":
match_filename = op.join(
ijb_dataset_root, "meta", "ijbc_template_pair_label.txt"
)
else:
raise NotImplementedError
col_name = ["TEMPLATE_ID1", "TEMPLATE_ID2", "IS_SAME"]
self.match = pd.read_csv(
match_filename,
delim_whitespace=True,
header=None,
dtype=str,
names=col_name,
)
if leave_ratio < 1.0: # shrink the number of verified pairs
indice = np.arange(len(self.match))
np.random.seed(0)
np.random.shuffle(indice)
left_number = int(len(self.match) * leave_ratio)
self.match = self.match.iloc[indice[:left_number]]
def __getitem__(self, idx):
def path_suffixes(id_str):
path = f"{id_str}.jpg"
return [path]
id1 = self.match.iloc[idx]["TEMPLATE_ID1"]
id2 = self.match.iloc[idx]["TEMPLATE_ID2"]
return {
"enroll_template_id": id1,
"verif_template_id": id2,
"enroll_path_suffixes": path_suffixes(id1),
"verif_path_suffixes": path_suffixes(id2),
"is_same": self.match.iloc[idx]["IS_SAME"],
}
def __len__(self):
return len(self.match)
class IJBCAllCroppedFacesDataset(Dataset):
"""
This dataset loads all faces available in IJB-C and transform
them into tensors. The path for that face is output along with
its tensor.
This is for models to compute all faces' features and store them
into disks, otherwise the verification testing set contains too many
repeated faces that should not be computed again and again.
"""
def __init__(self, ijbc_data_root):
self.ijbc_data_root = ijbc_data_root
self.transforms = transforms.Compose(
[
transforms.Resize([112, 112]),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
]
)
self.all_cropped_paths_img = sorted(
glob(op.join(self.ijbc_data_root, "cropped_faces", "img", "*.jpg"))
)
self.len_set1 = len(self.all_cropped_paths_img)
self.all_cropped_paths_frames = sorted(
glob(op.join(self.ijbc_data_root, "cropped_faces", "frames", "*.jpg"))
)
def __getitem__(self, idx):
if idx < self.len_set1:
path = self.all_cropped_paths_img[idx]
else:
path = self.all_cropped_paths_frames[idx - self.len_set1]
img = Image.open(path).convert("RGB")
tensor = self.transforms(img)
return {
"tensor": tensor,
"path": path,
}
def __len__(self):
return len(self.all_cropped_paths_frames) + len(self.all_cropped_paths_img)
class IJBCroppedFacesDataset(Dataset):
"""
This dataset loads all faces available in IJB-B/C, align them,
and transform them into tensors.
The path for that face is output along with its tensor.
This is for models to compute all faces' features and store them
into disks, otherwise the verification testing set contains too many
repeated faces that should not be computed again and again.
"""
def __init__(self, ijbc_data_root, is_ijbb=True):
self.ijbc_data_root = ijbc_data_root
self.transforms = transforms.Compose(
[
transforms.Resize([112, 112]),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
]
)
self.img_dir = op.join(self.ijbc_data_root, "loose_crop")
if is_ijbb:
landmark_txt = "ijbb_name_5pts_score.txt"
else:
landmark_txt = "ijbc_name_5pts_score.txt"
landmark_path = op.join(self.ijbc_data_root, "meta", landmark_txt)
self.imgs_list, self.landmarks_list = self.loadImgPathAndLandmarks(
landmark_path
)
self.alignment = Alignment()
def loadImgPathAndLandmarks(self, path):
imgs_list = []
landmarks_list = []
with open(path) as img_list:
lines = img_list.readlines()
for line in lines:
name_lmk_score = line.strip().split(" ")
img_name = os.path.join(self.img_dir, name_lmk_score[0])
lmk = np.array(
[float(x) for x in name_lmk_score[1:-1]], dtype=np.float32
)
lmk = lmk.reshape((5, 2))
imgs_list.append(img_name)
landmarks_list.append(lmk)
landmarks_list = np.array(landmarks_list)
return imgs_list, landmarks_list
def __getitem__(self, idx):
img_path = self.imgs_list[idx]
landmark = self.landmarks_list[idx]
img = cv2.imread(img_path)
# XXX cv2.cvtColor(img, cv2.COLOR_BGR2RGB) in the align function
img = self.alignment.align(img, landmark)
# img_feats.append(embedng.get(img,lmk))
img = Image.fromarray(img)
tensor = self.transforms(img)
return {
"tensor": tensor,
"path": img_path,
}
def __len__(self):
return len(self.imgs_list)
def make_square_box(box):
width = box[2] - box[0]
height = box[3] - box[1]
if width > height:
diff = width - height
box[1] -= diff // 2
box[3] += diff // 2
elif height > width:
diff = height - width
box[0] -= diff // 2
box[2] += diff // 2
return box
class IJBAVerificationDataset(Dataset):
def __init__(
self,
ijba_data_root="/tmp3/zhe2325138/IJB/IJB-A/",
split_name="split1",
only_first_image=False,
aligned_facial_3points=False,
crop_face=True,
):
self.ijba_data_root = ijba_data_root
split_root = op.join(ijba_data_root, "IJB-A_11_sets", split_name)
self.only_first_image = only_first_image
self.metadata = pd.read_csv(
op.join(split_root, f"verify_metadata_{split_name[5:]}.csv")
)
self.metadata = self.metadata.set_index("TEMPLATE_ID")
self.comparisons = pd.read_csv(
op.join(split_root, f"verify_comparisons_{split_name[5:]}.csv"), header=None
)
self.transform = transforms.Compose(
[
transforms.Resize([112, 112]),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
]
)
self.aligned_facial_3points = aligned_facial_3points
self.src_facial_3_points = self._get_source_facial_3points()
self.crop_face = crop_face
def _get_source_facial_3points(self, output_size=(112, 112)):
# set source landmarks based on 96x112 size
src = np.array(
[
[30.2946, 51.6963], # left eye
[65.5318, 51.5014], # right eye
[48.0252, 71.7366], # nose
# [33.5493, 92.3655], # left mouth
# [62.7299, 92.2041], # right mouth
],
dtype=np.float32,
)
# scale landmarkS to match output size
src[:, 0] *= output_size[0] / 96
src[:, 1] *= output_size[1] / 112
return src
def _get_face_img_from_entry(self, entry, square=True):
fname = entry["FILE"]
if fname[:5] == "frame":
fname = "frames" + fname[5:] # to fix error in annotation =_=
img = Image.open(op.join(self.ijba_data_root, "images", fname)).convert("RGB")
if self.aligned_facial_3points:
raise NotImplementedError
else:
if self.crop_face:
# left, upper, right, lower
face_box = [
entry["FACE_X"],
entry["FACE_Y"],
entry["FACE_X"] + entry["FACE_WIDTH"],
entry["FACE_Y"] + entry["FACE_HEIGHT"],
]
face_box = make_square_box(face_box) if square else face_box
face_img = img.crop(face_box)
else:
face_img = img
return face_img
def _get_tensor_from_entries(self, entries):
imgs = [self._get_face_img_from_entry(entry) for _, entry in entries.iterrows()]
tensors = torch.stack([self.transform(img) for img in imgs])
return tensors
def __getitem__(self, idx):
t1, t2 = self.comparisons.iloc[idx]
t1_entries, t2_entries = self.metadata.loc[[t1]], self.metadata.loc[[t2]]
if self.only_first_image:
t1_entries, t2_entries = t1_entries.iloc[:1], t2_entries.iloc[:1]
t1_tensors = self._get_tensor_from_entries(t1_entries)
t2_tensors = self._get_tensor_from_entries(t2_entries)
if self.only_first_image:
t1_tensors, t2_tensors = t1_tensors.squeeze(0), t2_tensors.squeeze(0)
s1, s2 = t1_entries["SUBJECT_ID"].iloc[0], t2_entries["SUBJECT_ID"].iloc[0]
is_same = 1 if (s1 == s2) else 0
return {
"comparison_idx": idx,
"t1_tensors": t1_tensors,
"t2_tensors": t2_tensors,
"is_same": is_same,
}
def __len__(self):
return len(self.comparisons)
class ARVerificationAllPathDataset(Dataset):
"/tmp3/biolin/datasets/face/ARFace/test2"
def __init__(
self, dataset_root="/tmp2/zhe2325138/dataset/ARFace/mtcnn_aligned_and_cropped/"
):
self.dataset_root = dataset_root
self.face_image_paths = sorted(glob(op.join(self.dataset_root, "*.png")))
self.transforms = transforms.Compose(
[
transforms.Resize([112, 112]),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
]
)
def __getitem__(self, idx):
fpath = self.face_image_paths[idx]
fname, _ = op.splitext(op.basename(fpath))
image = Image.open(fpath)
image_tensor = self.transforms(image)
return {"image_tensor": image_tensor, "fname": fname}
def __len__(self):
return len(self.face_image_paths)
class ARFaceDataset(Dataset):
def __init__(self, root_folder, transform=None):
self.root = root_folder
self.transform = transform
with os.scandir(root_folder) as it:
self.img_arr = pd.DataFrame([[entry.name, int(entry.name.split('-')[1])]
for entry in it if entry.name.endswith('.bmp')],
columns=['image_id', 'person_id'])
def __getitem__(self, index):
target = np.random.randint(0, 2) # 0: same person, 1: different person
row1 = self.img_arr.iloc[index]
if target == 0:
row2 = self.img_arr[self.img_arr['person_id'] == row1['person_id']].sample().iloc[0]
else:
row2 = self.img_arr[self.img_arr['person_id'] != row1['person_id']].sample().iloc[0]
img1 = Image.open(os.path.join(self.root, row1['image_id']))
img2 = Image.open(os.path.join(self.root, row2['image_id']))
return {
'data_input': (self.transform(img1), self.transform(img2)),
'is_same_labels': row1['person_id'] == row2['person_id'],
'index': index
}
def __len__(self):
return self.img_arr.shape[0]
class GeneGANDataset(Dataset):
def __init__(self, root_folder, identity_txt, transform=None):
self.root = root_folder
self.img_arr = pd.read_csv(identity_txt, sep=' ', header=None)
self.img_arr.columns = ['image_id', 'person_id']
for name in self.img_arr['image_id']:
if not os.path.exists(os.path.join(self.root, name)):
raise FileNotFoundError(f'{os.path.join(self.root, name)} does not exists.')
self.transform = transform
def __getitem__(self, index):
target = np.random.randint(0, 2) # 0: same person, 1: different person
row1 = self.img_arr.iloc[index]
if target == 0:
row2 = self.img_arr[self.img_arr['person_id'] == row1['person_id']].sample().iloc[0]
else:
row2 = self.img_arr[self.img_arr['person_id'] != row1['person_id']].sample().iloc[0]
img1 = Image.open(os.path.join(self.root, row1['image_id']))
img2 = Image.open(os.path.join(self.root, row2['image_id']))
return {
'data_input': (self.transform(img1), self.transform(img2)),
'targeted_id_labels': (row1['person_id'], row2['person_id'])
}
def __len__(self):
return self.img_arr.shape[0]
| 49,157 | 36.212718 | 107 | py |
xcos | xcos-master/src/data_loader/mnist_result.py | import numpy as np
from torch.utils.data import Dataset
class MnistResultDataset(Dataset):
"""
Customized MNIST result dataset demo
"""
def __init__(self, result_filename, key='model_output'):
self.key = key
self.results = self._load_data(result_filename, key)
def _load_data(self, result_filename, key):
return np.load(result_filename)[key]
def __getitem__(self, index):
""" Overwrite __getitem__ to return dictionary """
result = self.results[index]
return {
"index": index,
self.key: result
}
def __len__(self):
return len(self.results)
| 661 | 24.461538 | 60 | py |
xcos | xcos-master/src/data_loader/__init__.py | 0 | 0 | 0 | py | |
xcos | xcos-master/src/data_loader/mnist.py | from torchvision import datasets
class MnistDataset(datasets.MNIST):
"""
Customized MNIST dataset demo
"""
def __init__(self, data_dir, train, download, transform):
super().__init__(data_dir, train=train, download=download, transform=transform)
def __getitem__(self, index):
""" Overwrite __getitem__ to return dictionary """
data = super().__getitem__(index)
return {
"index": index,
"data_input": data[0],
"data_target": data[1]
}
| 532 | 27.052632 | 87 | py |
xcos | xcos-master/src/scripts/generate_masked_training_dataset.py | import os
import errno
import argparse
import numpy as np
from glob import glob
from PIL import Image
from tqdm import tqdm
from joblib import Parallel, delayed
def apply_single_mask(image_file, random_state):
random_state = np.random.RandomState(random_state)
mask_file = random_state.choice(mask_dir)
image = Image.open(image_file)
mask = Image.open(mask_file)
masked = np.array(image) * np.expand_dims(np.array(mask), 2)
name = os.path.basename(image_file)
out_subdir = os.path.basename(os.path.dirname(image_file))
if not os.path.exists(os.path.join(out_dir, out_subdir)):
try:
os.makedirs(os.path.join(out_dir, out_subdir))
except OSError as e:
if e.errno != errno.EEXIST:
raise OSError
# time.sleep might help here
pass
out_file = os.path.join(out_dir, out_subdir, name)
Image.fromarray(masked).save(out_file)
mask_number = os.path.join(os.path.basename(os.path.dirname(mask_file)), os.path.basename(mask_file))
output_name = os.path.join(os.path.basename(out_dir), out_subdir, name)
# csv_content.append((output_name, mask_number))
return (output_name, mask_number)
def apply_mask(img_dir, mask_dir, out_dir, csv_path, n_jobs):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
csv_content = []
print(f'>>> Img_dir[0]:{img_dir[0]}')
print(f'>>> mask_dir[0]:{mask_dir[0]}')
print(f'>>> out_dir:{out_dir}')
print(f'>>> csv_path:{csv_path}')
csv_content = Parallel(n_jobs=n_jobs,
backend="multiprocessing")(delayed(apply_single_mask)(image_file, None)
for image_file in tqdm(img_dir, total=len(img_dir)))
# [print(c[1]) for c in csv_content]
with open(csv_path, 'w') as filehandle:
for listitem in csv_content:
filehandle.write(f'{listitem[0]},{listitem[1]}\n')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-r', '--mask_ratio',
type=int,
default=25
),
parser.add_argument(
'-j', '--n_jobs',
type=int,
default=4
)
parser.add_argument(
'-md', '--mask_dir',
type=str,
default='../../datasets/face/masks'
)
parser.add_argument(
'-id', '--image_dir',
type=str,
default='../../datasets/face/faces_emore/imgs'
)
parser.add_argument(
'-od', '--output_dir',
type=str,
default='../../datasets/face/masked_ms1m'
)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
ratio = args.mask_ratio
mask_dir = glob(os.path.join(args.mask_dir, f'{ratio}/*'))
img_dir = glob(os.path.join(args.image_dir, '**/*.jpg'))
out_dir = os.path.join(args.output_dir, f'imgs_masked{ratio}')
csv_path = out_dir + '.csv'
# print(img_dir)
print(len(img_dir))
# np.random.seed(9487)
apply_mask(img_dir, mask_dir, out_dir, csv_path, args.n_jobs)
| 3,083 | 30.793814 | 106 | py |
xcos | xcos-master/src/scripts/make_dataset_list.py | '''
Make a list of files for a large dataset.
Example:
python scripts/make_dataset_list.py -p '../datasets/mnist/*/*' -o ../datasets/mnist_list.txt
'''
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # NOQA
from glob import glob
import argparse
from utils.logging_config import logger
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-p', '--pattern', type=str, required=True,
help='The root dataset directory pattern for glob (e.g. "../datasets/*")'
)
parser.add_argument(
'-o', '--output_filename', type=str, required=True,
help='Output txt file path'
)
args = parser.parse_args()
return args
def main(args):
abs_patterns = os.path.abspath(args.pattern)
logger.info(abs_patterns)
paths = sorted(glob(abs_patterns))
logger.info(f"There are totally {len(paths)} files")
with open(args.output_filename, 'w') as fout:
fout.writelines([f"{p}\n" for p in paths])
logger.info(f"{args.output_filename} written")
if __name__ == '__main__':
args = parse_args()
main(args)
| 1,155 | 24.688889 | 96 | py |
xcos | xcos-master/src/scripts/clean_saved.py | import shutil
import os
from os.path import basename, dirname, abspath
from glob import glob
import argparse
def num_checkpoints(path):
files = glob(os.path.join(path, '*.pth'))
return len(files)
def collect_satisfied(args):
collected = []
if args.exact_path is not None:
exp_paths = sorted(glob(os.path.join(args.root_dir_ckpts, args.exact_path)))
if len(exp_paths) > 0:
assert len(exp_paths) == 1
exp_path = exp_paths[0]
if num_checkpoints(exp_path) < args.at_least:
collected.append(exp_path)
else:
arch_paths = sorted(glob(os.path.join(args.root_dir_ckpts, args.pattern)))
for arch_path in arch_paths:
assert os.path.isdir(arch_path)
assert os.path.basename(arch_path) != 'runs'
exp_paths = sorted(glob(os.path.join(arch_path, '*')))
assert all([os.path.isdir(exp_path) for exp_path in exp_paths])
exp_paths = [
exp_path for exp_path in exp_paths
if num_checkpoints(exp_path) < args.at_least
]
collected.extend(exp_paths)
return collected
def ask_one_by_one(args, collected):
for path in collected:
exp_name = basename(dirname(path))
exp_time = basename(path)
runs_dir = os.path.join(args.root_dir_runs, exp_name, exp_time)
print('\nDelete the following directories?')
print(path)
if os.path.exists(runs_dir):
print(runs_dir)
response = input('[y/N]? ')
if response == 'y':
shutil.rmtree(path)
if os.path.exists(runs_dir):
shutil.rmtree(runs_dir)
print('Deleted.')
else:
print('No deletion performed.')
def clean_empty_exp(args):
def walk_clean(root):
for exp_path in glob(os.path.join(root, '*')):
assert os.path.isdir(exp_path)
if len(os.listdir(exp_path)) == 0:
os.rmdir(exp_path)
walk_clean(args.root_dir_ckpts)
walk_clean(args.root_dir_runs)
def ask_all_in_once(args, collected):
to_delete = []
print('The following directories will be deleted.')
for path in collected:
exp_name = basename(dirname(path))
exp_time = basename(path)
to_delete.append(path)
print(path)
runs_dir = os.path.join(args.root_dir_runs, exp_name, exp_time)
if os.path.exists(runs_dir):
to_delete.append(runs_dir)
print(runs_dir)
print()
response = input('[y/N]? ')
if response == 'y':
for path in to_delete:
shutil.rmtree(path)
print('Deleted')
else:
print('No deletion performed.')
parser = argparse.ArgumentParser()
parser.add_argument(
'--at_least', type=int, default=1,
help='The number of saved checkpoints required for not being deleted.'
'In other words, the saved files whose #checkpoints are less than this number will be deleted (Default: 1)')
parser.add_argument(
'-p', '--pattern', type=str,
help='Those saved files fit this re pattern will be deleted.')
parser.add_argument(
'--root_dir_ckpts', type=str,
default=os.path.join(dirname(dirname(abspath(__file__))), 'saved', 'ckpts'),
help='The save directory'
)
parser.add_argument(
'--root_dir_runs', type=str,
default=os.path.join(dirname(dirname(abspath(__file__))), 'saved', 'runs'),
help='The save directory'
)
parser.add_argument(
'-all', '--all_in_once', action='store_true',
help='Set this to disable the one-by-one comformation'
)
parser.add_argument(
'--exact_path', type=str, default=None,
help='Set this to a specific experiment path (exp_name/exp_time) to delete.'
)
args = parser.parse_args()
assert args.pattern is not None or args.exact_path is not None, 'A pattern or an exact exp path must be provided.'
assert args.pattern != '', 'Pattern can not be empty.'
if __name__ == '__main__':
collected = collect_satisfied(args)
if len(collected) == 0:
print('No satisfied directory.')
exit()
if args.all_in_once:
ask_all_in_once(args, collected)
else:
ask_one_by_one(args, collected)
clean_empty_exp(args)
| 4,275 | 30.441176 | 117 | py |
xcos | xcos-master/src/utils/global_config.py | '''
global_config.py
Global configuration module with a global var "global_config" for other modules to access
all configuration.
'''
from copy import deepcopy
import json
from attrdict import AttrDict
from .logging_config import logger
def flatten_nested_dict(nested_dict: dict, root_path: str, flattened_dict: dict):
""" Recursively iterate all values in a nested dictionary and return a flatten one.
Args:
nested_dict (dict): the nested dictionary to be flatten
root_path (str): node path, viewing nested_dict as a tree
flattened_dict (dict): recorded flattened_dict for recursive call
Returns:
flattened_dict (dict): flatten dictionary with flatten_key (root_node/leavenode/...) as path
"""
for k, v in nested_dict.items():
current_path = f"{root_path}/{k}" if root_path != "" else k
if type(v) != dict:
flattened_dict[current_path] = v
else:
flattened_dict = flatten_nested_dict(v, current_path, flattened_dict)
return flattened_dict
def get_value_in_nested_dict(nested_dict: dict, keys: list):
""" Get a value in a nested dictionary given a sequential key list. """
temp = nested_dict
for i, k in enumerate(keys):
temp = temp[k]
if i == len(keys) - 1:
return temp
def get_changed_and_added_config(template_config: dict, specified_config: dict):
""" Compare the difference between template config and specified config,
and return changed (include added) and added config.
"""
flattened_changed_config = {}
added_config = {}
# flattened nested dictionaries
flattened_template_config = flatten_nested_dict(template_config, "", dict())
flattened_specified_config = flatten_nested_dict(specified_config, "", dict())
# Check each value in specified_config to see if it is different from the template
for k, v in flattened_specified_config.items():
# Concatenate if it is name
if k == 'name' and 'name' in specified_config:
flattened_changed_config['name'] = f"{template_config['name']}+{specified_config['name']}"
# Added to flattened_changed_config only if it is different from the tempalte
elif k in flattened_template_config:
if v != flattened_template_config[k]:
flattened_changed_config[k] = v
# Added to both added_config flattened_changed_config if it is new
else:
flattened_changed_config[k] = v
added_config[k] = v
return flattened_changed_config, added_config
def merge_template_and_flattened_changed_config(template_config, flattened_changed_config):
""" Merge the template and changed config as a global_config. """
merged_config = deepcopy(template_config)
for k, v in flattened_changed_config.items():
keys = k.split('/')
# Trace the path by the key and current_dict
current_dict = merged_config
for i, k in enumerate(keys):
if i == len(keys) - 1:
current_dict[k] = v
else:
# If it is added, create a new dictionary for it
if k not in current_dict:
current_dict[k] = {}
current_dict = current_dict[k]
return merged_config
class SingleGlobalConfig(AttrDict):
""" The global config object for all module configuration.
It needs to be setup first by main.py
It is a AttrDict with additional functions for template/specified config settings.
One could use either global_config.some_attribute or global_config['some_attribute']
to access the config
Note that since this class is inherited from AttrDict, attributes without starting with a _ will
be put into the dictionary. Attrbutes starting with a _ would be viewed as invalid.
To setup template_config and specified_config, here the '_allow_invalid_attributes' is set to
be True first to allow this invalid self attributes and avoid putting them into the self dict.
"""
def setup(self, template_config_filename: list, specified_config_filenames: list, resumed_checkpoint: dict = None):
""" Setup the global_config. """
# NOTE: this function needs to be called by main.py before imported by modules unless it is resumed
# This is to set self._template_config, self._specified_config etc as they are classified as invalid attributes
# to be put into self. See https://github.com/bcj/AttrDict/blob/9f672997bf/attrdict/mixins.py#L169
self._setattr('_allow_invalid_attributes', True)
if resumed_checkpoint is not None:
self._template_config = resumed_checkpoint['config']
else:
self._template_config = self._load_template_config(template_config_filename)
self._specified_config = self._load_specified_configs(specified_config_filenames)
# Compare specified_config and template_config to get changed_config/merged_config
self._flattened_changed_config, self._added_config = \
get_changed_and_added_config(self._template_config, self._specified_config)
self._merged_config = merge_template_and_flattened_changed_config(
self._template_config, self._flattened_changed_config)
self._setattr('_allow_invalid_attributes', False)
self.set_config(self._merged_config)
def set_config(self, config: dict):
""" Set the config. """
for k, v in config.items():
self[k] = v
def __print__(self):
""" Print all key & value pairs. """
for k, v in self.items():
logger.info(f"{k}: {v}")
def print_changed(self):
""" Print all changed/added values. """
for k, v in self._flattened_changed_config.items():
if k in self._added_config:
logger.info(f"Added key: {k} ({v})")
else:
original_value = get_value_in_nested_dict(self._template_config, k.split('/'))
logger.warning(f"Changed key: {k} ({original_value} -> {v})")
def _load_template_config(self, config_filename: str):
""" Load the template config. """
# Note that since this class is inherited from AttrDict, attributes without starting with a _ will
# be put into the dictionary.
with open(config_filename) as fin:
logger.info(f"===== Using {config_filename} as template config =====")
return json.load(fin)
def _load_specified_configs(self, config_filenames: list):
""" Load specified config(s). """
# Note that since this class is inherited from AttrDict, attributes without starting with a _ will
# be put into the dictionary.
return self._extend_configs({}, config_filenames)
def _extend_configs(self, config: dict, config_filenames: list):
""" Extend a dict config with several config files. """
if config_filenames is None:
return {}
# load config files, the overlapped entries will be overwriten
for config_filename in config_filenames:
with open(config_filename) as fin:
added_config = json.load(fin)
config = self._extend_config(config, added_config)
return config
def _extend_config(self, config: dict, added_config: dict):
""" Extend a dict config with an dict added_config"""
for key, value in added_config.items():
if key in config.keys():
if key == 'name':
value = f"{config[key]}_{value}"
else:
logger.warning(f"Overriding '{key}' in config")
del config[key]
config[key] = value
return config
# Initialize this global_config first
# and then for all modules, import this config
global_config = SingleGlobalConfig()
| 7,905 | 40.39267 | 119 | py |
xcos | xcos-master/src/utils/visualization.py | try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
print("Using tensorboardX instead of built-in tensorboard (need PyTorch 1.2+ with Tensorboard 1.14+)")
from tensorboardX import SummaryWriter
class WriterTensorboard():
def __init__(self, writer_dir, logger, enable):
self.writer = None
if enable:
log_path = writer_dir
self.writer = SummaryWriter(log_path)
self.step = 0
self.mode = ''
self.tensorboard_writer_ftns = ['add_scalar', 'add_scalars', 'add_image', 'add_video',
'add_audio', 'add_text', 'add_histogram', 'add_pr_curve', 'add_embedding']
def set_step(self, step, mode='train'):
self.mode = mode
self.step = step
def __getattr__(self, name):
"""
If visualization is configured to use:
return add_data() methods of tensorboard with additional information (step, tag) added.
Otherwise:
return blank function handle that does nothing
"""
if name in self.tensorboard_writer_ftns:
add_data = getattr(self.writer, name, None)
def wrapper(tag, data, *args, **kwargs):
if add_data is not None:
add_data(f'{self.mode}/{tag}', data, self.step, *args, **kwargs)
return wrapper
else:
# default action for returning methods defined in this class, set_step() for instance.
try:
attr = object.__getattr__(name)
except AttributeError:
raise AttributeError(f"type object 'WriterTensorboardX' has no attribute '{name}'")
return attr
| 1,723 | 37.311111 | 114 | py |
xcos | xcos-master/src/utils/align.py | import cv2
import numpy as np
from skimage import transform as trans
class Alignment:
def __init__(self):
image_size = (112, 112)
self.image_size = image_size
src = np.array([
[30.2946, 51.6963],
[65.5318, 51.5014],
[48.0252, 71.7366],
[33.5493, 92.3655],
[62.7299, 92.2041]], dtype=np.float32)
src[:, 0] += 8.0
self.src = src
def align(self, rimg, landmark):
assert landmark.shape[0] == 68 or landmark.shape[0] == 5
assert landmark.shape[1] == 2
if landmark.shape[0] == 68:
landmark5 = np.zeros((5, 2), dtype=np.float32)
landmark5[0] = (landmark[36] + landmark[39]) / 2
landmark5[1] = (landmark[42] + landmark[45]) / 2
landmark5[2] = landmark[30]
landmark5[3] = landmark[48]
landmark5[4] = landmark[54]
else:
landmark5 = landmark
tform = trans.SimilarityTransform()
tform.estimate(landmark5, self.src)
M = tform.params[0:2, :]
img = cv2.warpAffine(rimg, M, (self.image_size[1],
self.image_size[0]),
borderValue=0.0)
# BGR2RGB
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img_flip = np.fliplr(img)
# HWC2CHW
# img = np.transpose(img, (2, 0, 1)) # 3*112*112, RGB
return img
| 1,439 | 32.488372 | 64 | py |
xcos | xcos-master/src/utils/logging_config.py | import logging
import sys
stream_handler = logging.StreamHandler(sys.stdout)
format_ = ('[%(asctime)s] {%(filename)s:%(lineno)d} '
'%(levelname)s - %(message)s')
try:
# use colored logs if installed
import coloredlogs
formatter = coloredlogs.ColoredFormatter(fmt=format_)
stream_handler.setFormatter(formatter)
except Exception:
pass
logging.basicConfig(
level=logging.INFO,
format=format_,
datefmt='%m-%d %H:%M:%S',
handlers=[stream_handler]
)
logger = logging.getLogger(__name__)
| 533 | 21.25 | 57 | py |
xcos | xcos-master/src/utils/insight2xcos.py | # from model.face_recog import Backbone_FC2Conv, Backbone
# from model.xcos_modules import XCosAttention
# backbone = Backbone_FC2Conv(50, 0.6, 'ir_se')
# attention = XCosAttention(use_softmax=True, softmax_t=1, chw2hwc=True)
# backbone_target = Backbone(50,
# 0.6,
# 'ir_se')
# backbone.load_state_dict(torch.load(backbone_weights_path), strict=True)
# attention.load_state_dict(torch.load(atten_weights_path), strict=True)
# backbone_target.load_state_dict(torch.load(backbone_target_path))
import os.path as op
import torch
from collections import OrderedDict
from model.model import xCosModel
insight_dir = "/home/r07944011/researches/InsightFace_Pytorch"
backbone_weights_path = 'work_space/save/model_2019-08-25-14-35_accuracy:0.9931666666666666_step:218349_None.pth'
atten_weights_path = 'work_space/save/model_attention_2019-08-25-14-35_accuracy:0.9931666666666666_step:218349_None.pth'
backbone_target_path = "work_space/save/model_ir_se50.pth"
output_name = '../pretrained_model/xcos/20200217_accu_9931_Arcface.pth'
backbone_weights_path = 'work_space/save/model_2019-09-02-08-21_accuracy:0.9968333333333333_step:436692_CosFace.pth'
atten_weights_path = 'work_space/save/model_attention_2019-09-02-08-21_'\
'accuracy:0.9968333333333333_step:436692_CosFace.pth'
backbone_target_path = "work_space/save/model_irse50_CosFace_ms1m_9039.pth"
output_name = '../pretrained_model/xcos/20200226_accu_9968_Cosface.pth'
backbone_weights_path = op.join(insight_dir, backbone_weights_path)
atten_weights_path = op.join(insight_dir, atten_weights_path)
backbone_target_path = op.join(insight_dir, backbone_target_path)
xcos_model = xCosModel()
xcos_model.backbone.load_state_dict(torch.load(backbone_weights_path), strict=True)
xcos_model.attention.load_state_dict(torch.load(atten_weights_path), strict=True)
xcos_model.backbone_target.load_state_dict(torch.load(backbone_target_path))
model_state = xcos_model.state_dict()
model_state_tmp = OrderedDict()
for k, v in model_state.items():
if k.startswith("head"):
print(k)
else:
model_state_tmp[k] = v
model_state = model_state_tmp
state = {
'state_dict': model_state
}
torch.save(state, output_name)
| 2,258 | 43.294118 | 120 | py |
xcos | xcos-master/src/utils/insight_to_normal_face_model.py | # from model.face_recog import Backbone_FC2Conv, Backbone
# from model.xcos_modules import XCosAttention
# backbone = Backbone_FC2Conv(50, 0.6, 'ir_se')
# attention = XCosAttention(use_softmax=True, softmax_t=1, chw2hwc=True)
# backbone_target = Backbone(50,
# 0.6,
# 'ir_se')
# backbone.load_state_dict(torch.load(backbone_weights_path), strict=True)
# attention.load_state_dict(torch.load(atten_weights_path), strict=True)
# backbone_target.load_state_dict(torch.load(backbone_target_path))
import os.path as op
import torch
from collections import OrderedDict
from model.model import NormalFaceModel
insight_dir = "/home/r07944011/researches/InsightFace_Pytorch"
backbone_path = "work_space/save/model_ir_se50.pth"
output_name = '../pretrained_model/baseline/20200228_accu_9952_Arcface_backbone.pth'
# backbone_path = "work_space/save/model_irse50_CosFace_ms1m_9039.pth"
# output_name = '../pretrained_model/baseline/20200228_accu_9930_Cosface_backbone.pth'
backbone_weights_path = op.join(insight_dir, backbone_path)
model = NormalFaceModel()
model.backbone.load_state_dict(torch.load(backbone_weights_path), strict=True)
model_state = model.state_dict()
model_state_tmp = OrderedDict()
for k, v in model_state.items():
if k.startswith("head"):
print(k)
else:
model_state_tmp[k] = v
model_state = model_state_tmp
state = {
'state_dict': model_state
}
torch.save(state, output_name)
| 1,476 | 33.348837 | 86 | py |
xcos | xcos-master/src/utils/util.py | import os
import os.path as op
from glob import glob
import importlib.util
import torch
import numpy as np
import io
import cv2
import base64
import seaborn as sns
from PIL import Image
from torchvision.transforms import ToTensor
from matplotlib import pyplot as plt
lib_path = op.abspath(op.join(__file__, op.pardir, op.pardir, op.pardir, 'libs'))
def get_instance(module, name, config, *args, **kargs):
return getattr(module, config[name]['type'])(*args, **config[name]['args'], **kargs)
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path)
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def get_everything_under(root_dir, pattern='*', only_dirs=False, only_files=False):
assert not(only_dirs and only_files), 'You will get nothnig '\
'when "only_dirs" and "only_files" are both set to True'
everything = sorted(glob(os.path.join(root_dir, pattern)))
if only_dirs:
everything = list(filter(lambda f: os.path.isdir(f), everything))
if only_files:
everything = list(filter(lambda f: os.path.isfile(f), everything))
return everything
def one_hot_embedding(labels, num_classes):
# From https://discuss.pytorch.org/t/convert-int-into-one-hot-format/507/26
"""Embedding labels to one-hot form.
Args:
labels: (LongTensor) class labels, sized [N,].
num_classes: (int) number of classes.
Returns:
(tensor) encoded labels, sized [N, #classes].
"""
y = torch.eye(num_classes)
return y[labels]
class DeNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image(s) to be normalized.
Should be in size [B, C, W, H] (a batch of images) or [C, W, H] (single image)
Returns:
Tensor: Normalized image.
"""
if len(tensor.shape) == 4: # [B, C, W, H]
c_dim = 1
elif len(tensor.shape) == 3: # [C, W, H]
c_dim = 0
else:
raise NotImplementedError()
tensors = tensor.split(1, dim=c_dim)
out = []
for t, m, s in zip(tensors, self.mean, self.std):
# Normalization: (t - m) / s
out.append(t * s + m)
tensor = torch.cat(out, dim=c_dim)
return tensor
def import_given_path(module_name, path):
spec = importlib.util.spec_from_file_location(module_name, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def tensor_np_histogram(tensor):
return np.histogram(tensor.cpu().numpy().flatten())
def batch_visualize_xcos(img1s, img2s, grid_cos_maps, attention_maps):
result_imgs = []
for i in range(len(img1s)):
result_imgs.append(visualize_xcos(img1s[i], img2s[i],
grid_cos_maps[i], attention_maps[i]))
return result_imgs
def visualize_xcos(image1, image2, grid_cos_map, attention_map,
name1=None, name2=None,
regressed_cos=None, is_same=None, threshold=0.245,
return_base64=False):
"""Plot the qualitative result of xCos
Arguments:
image1 [np.array] -- of shape (c, h, w); value: ![0, 255] (float32)
image2 [np.array] -- of shape (c, h, w)
grid_cos_map [np.array] -- of shape (h, w)
attention_map [np.array] -- of shape (h, w)
Returns:
[type] -- [description]
"""
plt.gcf().clear()
# name1, name2 = 'Left', 'Right'
# isSame = int(isSame)
# Unnormalize images
image1 = ((image1 * 0.5 + 0.5) * 255).astype('uint8')
image2 = ((image2 * 0.5 + 0.5) * 255).astype('uint8')
# CHW2HWC
image1 = np.transpose(image1, (1, 2, 0))
image2 = np.transpose(image2, (1, 2, 0))
# XXX BGR2RGB should be executed twice otherwise cv2 would complain in drawGird
# Input img should be in PIL format (RGB).
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
# same = 1 if float(cos_fr) > threshold else 0
# title_str = getTFNPString(isSame, same)
# Create visualization
fig_size = (14, 3)
fig, axs = plt.subplots(1, 4, tight_layout=True, figsize=fig_size)
# if subtitle:
# fig.suptitle(title_str +
# ' Cos=%.2f xCos=%.2f' % (float(cos_fr), cos_x))
[axs[i].set_axis_off() for i in range(4)]
axs[0].set_title('Face 1', y=-0.1)
axs[1].set_title('Face 2', y=-0.1)
axs[2].set_title(r'$cos_{patch}$', y=-0.1)
axs[3].set_title(r'$weight_{attetion}$', y=-0.1)
drawGridLines(image1, 6, 6)
drawGridLines(image2, 6, 6)
axs[0].imshow(image1)
axs[1].imshow(image2)
# Show grid_cos_map.
im, cbar = heatmap_seaborn(grid_cos_map, [], [], ax=axs[2],
cmap="RdBu", threshold=threshold)
# Show weights_attention.
im, cbar = heatmap(attention_map, [], [], ax=axs[3], cmap="YlGn")
# Save file.
# img_name = os.path.join(exPath, filename)
# print(img_name)
# score_log_name = os.path.splitext(img_name)[0]+'.txt'
# with open(score_log_name, 'w') as the_file:
# the_file.write(f"{cos_x}")
# plt.savefig(img_name, bbox_inches='tight')
# return the base64 image (for demo xCos purpose)
if return_base64:
pic_IObytes = io.BytesIO()
plt.savefig(pic_IObytes, format='jpg')
pic_IObytes.seek(0)
pic_hash = base64.b64encode(pic_IObytes.read())
plt.close()
return pic_hash
else:
buf = io.BytesIO()
plt.savefig(buf, format='jpeg', dpi=100)
buf.seek(0)
image = Image.open(buf)
image = ToTensor()(image).unsqueeze(0)
plt.close()
return image
def drawGridLines(image_t, w_lines=5, h_lines=6,
colorRGB=(128, 128, 128)):
'''
colorRGB: default: gray(128, 128, 128), you can use red(255, 0, 0)
'''
colorRGB = (255, 0, 0)
w_lines += 1
h_lines += 1
h, w, _ = image_t.shape
w_unit = int(w // w_lines)
# w_start = int(w_unit // 2)
w_start = w_unit
h_unit = int(h // h_lines)
# h_start = int(h_unit // 2)
h_start = h_unit
# Draw vertical grid lines
for step in range(w_lines):
start_pt = (w_start + w_unit * step, 0)
end_pt = (w_start + w_unit * step, h)
cv2.line(image_t, start_pt, end_pt, colorRGB, 1, 1)
# Draw horizontal grid lines
for step in range(h_lines):
start_pt = (0, h_start + h_unit * step)
end_pt = (w, h_start + h_unit * step)
cv2.line(image_t, start_pt, end_pt, colorRGB, 1, 1)
def heatmap(data, row_labels, col_labels, ax=None,
cbar_kw=None, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Parameters
----------
data
A 2D numpy array of shape (N, M).
row_labels
A list or array of length N with the labels for the rows.
col_labels
A list or array of length M with the labels for the columns.
ax
A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If
not provided, use current axes or create a new one. Optional.
cbar_kw
A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.
cbarlabel
The label for the colorbar. Optional.
**kwargs
All other arguments are forwarded to `imshow`.
"""
if cbar_kw is None:
cbar_kw = {}
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
rotation_mode="anchor")
# Turn spines off and create white grid.
for _, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1] + 1) - .5, minor=True)
ax.set_yticks(np.arange(data.shape[0] + 1) - .5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im, cbar
def heatmap_seaborn(data, row_labels, col_labels, ax=None,
cmap=None, cbarlabel="", threshold=0.5, **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Parameters
----------
data
A 2D numpy array of shape (N, M).
row_labels
A list or array of length N with the labels for the rows.
col_labels
A list or array of length M with the labels for the columns.
ax
A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If
not provided, use current axes or create a new one. Optional.
cbar_kw
A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.
cbarlabel
The label for the colorbar. Optional.
**kwargs
All other arguments are forwarded to `imshow`.
"""
if not ax:
exit('no ax')
ax = plt.gca()
# Plot the heatmap
g = sns.heatmap(data, ax=ax, center=threshold, vmin=-1, vmax=1,
cmap=cmap, cbar_kws={'label': cbarlabel})
# Create colorbar
# cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
# cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
cbar = None
# # We want to show all ticks...
# ax.set_xticks(np.arange(data.shape[1]))
# ax.set_yticks(np.arange(data.shape[0]))
# # ... and label them with the respective list entries.
# ax.set_xticklabels(col_labels)
# ax.set_yticklabels(row_labels)
# # Let the horizontal axes labeling appear on top.
# ax.tick_params(top=True, bottom=False,
# labeltop=True, labelbottom=False)
# # Rotate the tick labels and set their alignment.
# plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
# rotation_mode="anchor")
# # Turn spines off and create white grid.
# for edge, spine in ax.spines.items():
# spine.set_visible(False)
# ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0] + 1), minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return g, cbar
| 11,115 | 31.127168 | 90 | py |
xcos | xcos-master/src/utils/verification.py | import numpy as np
from sklearn.model_selection import KFold
import matplotlib.pyplot as plt
import io
from PIL import Image
from torchvision import transforms
def calculate_accuracy(threshold, dist, actual_issame, useCos=False):
'''
if useCos = True, then view 'dist' variable as cos
'''
if useCos:
predict_issame = np.greater(dist, threshold)
else:
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame)))
fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn)
fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn)
acc = float(tp + tn) / dist.size
return tpr, fpr, acc
def calculate_roc_attention(thresholds,
xCoses,
actual_issame, nrof_folds=10, pca=0):
nrof_pairs = min(len(actual_issame), xCoses.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds, nrof_thresholds))
fprs = np.zeros((nrof_folds, nrof_thresholds))
accuracy = np.zeros((nrof_folds))
best_thresholds = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
# print('pca', pca)
if pca == 0:
cosines = xCoses
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
# print('train_set', train_set)
# print('test_set', test_set)
if pca > 0:
raise NotImplementedError
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = calculate_accuracy(
threshold,
cosines[train_set],
actual_issame[train_set],
useCos=True)
best_threshold_index = np.argmax(acc_train)
best_thresholds[fold_idx] = thresholds[best_threshold_index]
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx, threshold_idx], fprs[fold_idx, threshold_idx], _ = \
calculate_accuracy(threshold,
cosines[test_set],
actual_issame[test_set],
useCos=True)
_, _, accuracy[fold_idx] = calculate_accuracy(
thresholds[best_threshold_index],
cosines[test_set],
actual_issame[test_set],
useCos=True)
tpr = np.mean(tprs, 0)
fpr = np.mean(fprs, 0)
return tpr, fpr, accuracy, best_thresholds
def evaluate_accuracy(xCoses, actual_issame, nrof_folds=10, pca=0):
'''
xCoses: np.array (# of pairs,)
actual_issame: list (# of pairs,)
'''
# Calculate evaluation metrics
thresholds = np.arange(-1.0, 1.0, 0.005)
tpr, fpr, accuracy, best_thresholds = calculate_roc_attention(
thresholds, xCoses,
np.asarray(actual_issame), nrof_folds=nrof_folds, pca=pca)
# thresholds = np.arange(0, 4, 0.001)
# val, val_std, far = calculate_val(thresholds, embeddings1, embeddings2,
# np.asarray(actual_issame), 1e-3, nrof_folds=nrof_folds)
# return tpr, fpr, accuracy, best_thresholds, val, val_std, far
roc_curve_tensor = get_roc_curve(fpr, tpr)
return accuracy, best_thresholds, roc_curve_tensor
def get_roc_curve(fpr, tpr):
buf = gen_plot(fpr, tpr)
roc_curve = Image.open(buf)
roc_curve_tensor = transforms.ToTensor()(roc_curve)
return roc_curve_tensor
def gen_plot(fpr, tpr):
"""Create a pyplot plot and save to buffer."""
plt.figure()
plt.xlabel("FPR", fontsize=14)
plt.ylabel("TPR", fontsize=14)
plt.title("ROC Curve", fontsize=14)
# plot = plt.plot(fpr, tpr, linewidth=2)
buf = io.BytesIO()
plt.savefig(buf, format='jpeg')
buf.seek(0)
plt.close()
return buf
def getTFNPString(same, isSame_pred):
title_str = 'LL'
if same == 1 and int(isSame_pred) == 0:
# not the same person but predicted the same]
# False negative
title_str = 'False_Negative'
elif same == 0 and int(isSame_pred) == 1:
# False positive
title_str = 'False_Positive'
elif same == 1 and int(isSame_pred) == 1:
# True positive
title_str = 'True_Positive'
elif same == 0 and int(isSame_pred) == 0:
# True negative
title_str = 'True_Negative'
return title_str
def checkTFPN(cos, is_same_label, threshold=0.2545):
# 0.2545 is the threshold for xCosArcFace
same = 1 if float(cos) > threshold else 0
return getTFNPString(is_same_label, same)
| 4,936 | 33.284722 | 95 | py |
xcos | xcos-master/src/utils/__init__.py | 0 | 0 | 0 | py | |
xcos | xcos-master/src/utils/util_python.py | def read_lines_into_list(filename):
content = []
with open(filename) as f:
for line in f:
content.append(line.strip())
# # you may also want to remove whitespace characters like `\n` at the end of each line
# content = [x.strip() for x in content]
return content
| 303 | 32.777778 | 91 | py |
xcos | xcos-master/src/model/base_model.py | import torch.nn as nn
import numpy as np
from utils.logging_config import logger
class BaseModel(nn.Module):
"""
Base class for all models
"""
def __init__(self):
super(BaseModel, self).__init__()
def forward(self, *input):
"""
Forward pass logic
:return: Model output
"""
raise NotImplementedError
def summary(self):
"""
Model summary
"""
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
logger.info(f'Trainable parameters: {params}')
logger.info(self)
| 672 | 20.709677 | 79 | py |
xcos | xcos-master/src/model/loss.py | import torch
import torch.nn as nn
class BaseLoss(nn.Module):
def __init__(self, output_key, target_key, nickname=None, weight=1):
super().__init__()
self.output_key = output_key
self.target_key = target_key
self.weight = weight
self.nickname = self.__class__.__name__ if nickname is None else nickname
def _preproces(self, data_dict, output_dict):
return data_dict, output_dict
def _postprocess(self, output, target):
return output, target
def forward(self, data_dict, output_dict):
data_dict, output_dict = self._preproces(data_dict, output_dict)
output = output_dict[self.output_key]
target = data_dict[self.target_key]
output, target = self._postprocess(output, target)
return self.loss_fn(output, target)
class CrossEntropyLoss(BaseLoss):
def __init__(self, *args, **kargs):
super().__init__(*args, **kargs)
self.loss_fn = nn.CrossEntropyLoss()
class SiameseCrossEntropyLoss(BaseLoss):
def __init__(self, *args, **kargs):
super().__init__(*args, **kargs)
self.loss_fn = nn.CrossEntropyLoss()
def _preproces(self, data_dict, output_dict):
data_dict[self.target_key] = torch.cat(data_dict[self.target_key])
return data_dict, output_dict
class SiameseMSELoss(BaseLoss):
def __init__(self, *args, **kargs):
super().__init__(*args, **kargs)
self.loss_fn = nn.MSELoss()
def _preproces(self, data_dict, output_dict):
data_dict[self.target_key] = output_dict[self.target_key]
return data_dict, output_dict
class GANLoss(BaseLoss):
def __init__(
self, network,
type_='lsgan',
target_real_label=1.0, target_fake_label=0.0,
*args, **kargs
):
super().__init__(output_key=None, target_key=None, *args, **kargs)
assert network in ['generator', 'discriminator']
if type_ == 'nsgan':
self.loss_fn = nn.BCELoss()
elif type_ == 'lsgan':
self.loss_fn = nn.MSELoss()
elif type_ == 'l1':
self.loss_fn = nn.L1Loss()
elif type_ == 'hinge':
self.hinge_loss = HingeLossG() if network == 'generator' else HingeLossD()
else:
raise NotImplementedError()
self.type_ = type_
self.network = network
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
def forward(self, data_dict, output_dict):
if self.type_ == 'hinge':
return self.hinge_loss(data_dict, output_dict)
if self.network == 'generator':
outputs = output_dict['D_G_z']
targets = self.real_label.expand_as(outputs).to(outputs.device)
loss = self.loss_fn(outputs, targets)
elif self.network == 'discriminator':
fake_outputs = output_dict['D_G_z']
fake_targets = self.fake_label.expand_as(fake_outputs).to(fake_outputs.device)
loss_d_fake = self.loss_fn(fake_outputs, fake_targets)
real_outputs = output_dict['D_x']
real_targets = self.real_label.expand_as(real_outputs).to(real_outputs.device)
loss_d_real = self.loss_fn(real_outputs, real_targets)
loss = (loss_d_fake + loss_d_real) / 2
else:
raise NotImplementedError(f"Wrong network '{self.network}' for GANMSELoss")
return loss
# Formulation reference: https://arxiv.org/pdf/1802.05957.pdf (eq. 17)
class HingeLossG(nn.Module):
def __init__(self):
super().__init__()
def forward(self, data_dict, output_dict):
D_G_z = output_dict['D_G_z']
return (-D_G_z).mean()
# Formulation reference: https://arxiv.org/pdf/1802.05957.pdf (eq. 16)
class HingeLossD(nn.Module):
def __init__(self):
super().__init__()
self.relu = nn.ReLU()
def forward(self, data_dict, output_dict):
D_G_z = output_dict['D_G_z']
D_x = output_dict['D_x']
loss_real = self.relu(1 - D_x).mean()
loss_fake = self.relu(1 + D_G_z).mean()
return loss_real + loss_fake
| 4,210 | 32.688 | 90 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.