repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
xcos | xcos-master/src/model/face_recog.py | from torch.nn import (Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU,
ReLU, Sigmoid, Dropout, MaxPool2d,
AdaptiveAvgPool2d, Sequential, Module, Parameter)
# import torch.nn.functional as F
import torch
from collections import namedtuple
import math
from .networks import normal_init
# Original Arcface Model #############################################################
class Flatten(Module):
def forward(self, input):
return input.view(input.size(0), -1)
def l2_norm(input, axis=1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
class SEModule(Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = AdaptiveAvgPool2d(1)
self.fc1 = Conv2d(
channels, channels // reduction, kernel_size=1, padding=0, bias=False)
self.relu = ReLU(inplace=True)
self.fc2 = Conv2d(
channels // reduction, channels, kernel_size=1, padding=0, bias=False)
self.sigmoid = Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class bottleneck_IR(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth))
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth))
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class bottleneck_IR_SE(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR_SE, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth))
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth),
SEModule(depth, 16))
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
'''A named tuple describing a ResNet block.'''
def get_block(in_channel, depth, num_units, stride=2):
return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
def get_blocks(num_layers):
if num_layers == 50:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=4),
get_block(in_channel=128, depth=256, num_units=14),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 100:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=13),
get_block(in_channel=128, depth=256, num_units=30),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 152:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=8),
get_block(in_channel=128, depth=256, num_units=36),
get_block(in_channel=256, depth=512, num_units=3)
]
return blocks
class Backbone(Module):
def __init__(self, num_layers, drop_ratio, mode='ir'):
super(Backbone, self).__init__()
assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
blocks = get_blocks(num_layers)
if mode == 'ir':
unit_module = bottleneck_IR
elif mode == 'ir_se':
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
BatchNorm2d(64),
PReLU(64))
self.output_layer = Sequential(BatchNorm2d(512),
Dropout(drop_ratio),
Flatten(),
Linear(512 * 7 * 7, 512),
BatchNorm1d(512))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(
unit_module(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride))
self.body = Sequential(*modules)
def forward(self, x):
x = self.input_layer(x)
x = self.body(x)
x = self.output_layer(x)
return l2_norm(x)
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
class Backbone_FC2Conv(Module):
def __init__(self, num_layers, drop_ratio, mode='ir', returnGrid=True):
super(Backbone_FC2Conv, self).__init__()
assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
blocks = get_blocks(num_layers)
if mode == 'ir':
unit_module = bottleneck_IR
elif mode == 'ir_se':
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
BatchNorm2d(64),
PReLU(64))
# I only append this module
self.conv1x1 = Sequential(Conv2d(512, 32, (1, 1), 1, 0),
BatchNorm2d(32),
PReLU(32))
self.output_layer = Sequential(BatchNorm2d(512),
Dropout(drop_ratio),
Flatten(),
Linear(512 * 7 * 7, 512),
BatchNorm1d(512))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(
unit_module(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride))
self.body = Sequential(*modules)
# Newly appended
self.returnGrid = returnGrid
def forward(self, x):
x = self.input_layer(x)
x = self.body(x)
# x.size() : [bs, 512, 7, 7]
# x = self.output_layer(x)
x = self.conv1x1(x)
# x.size() : [bs, 32, 7, 7]
grid_feat = x
x = x.flatten(1)
# x.size() : [bs, 1568]
if self.returnGrid:
return l2_norm(x), grid_feat
else:
return l2_norm(x)
def get_original_feature(self, x):
x = self.input_layer(x)
x = self.body(x)
x = self.output_layer(x)
return l2_norm(x)
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
# MobileFaceNet #############################################################
class Conv_block(Module):
def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):
super(Conv_block, self).__init__()
self.conv = Conv2d(in_c, out_channels=out_c, kernel_size=kernel, groups=groups,
stride=stride, padding=padding, bias=False)
self.bn = BatchNorm2d(out_c)
self.prelu = PReLU(out_c)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.prelu(x)
return x
class Linear_block(Module):
def __init__(self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1):
super(Linear_block, self).__init__()
self.conv = Conv2d(in_c, out_channels=out_c, kernel_size=kernel, groups=groups,
stride=stride, padding=padding, bias=False)
self.bn = BatchNorm2d(out_c)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class Depth_Wise(Module):
def __init__(self, in_c, out_c, residual=False, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=1):
super(Depth_Wise, self).__init__()
self.conv = Conv_block(in_c, out_c=groups, kernel=(1, 1), padding=(0, 0), stride=(1, 1))
self.conv_dw = Conv_block(groups, groups, groups=groups, kernel=kernel, padding=padding, stride=stride)
self.project = Linear_block(groups, out_c, kernel=(1, 1), padding=(0, 0), stride=(1, 1))
self.residual = residual
def forward(self, x):
if self.residual:
short_cut = x
x = self.conv(x)
x = self.conv_dw(x)
x = self.project(x)
if self.residual:
output = short_cut + x
else:
output = x
return output
class Residual(Module):
def __init__(self, c, num_block, groups, kernel=(3, 3), stride=(1, 1), padding=(1, 1)):
super(Residual, self).__init__()
modules = []
for _ in range(num_block):
modules.append(Depth_Wise(c, c, residual=True, kernel=kernel,
padding=padding, stride=stride, groups=groups))
self.model = Sequential(*modules)
def forward(self, x):
return self.model(x)
class MobileFaceNet(Module):
def __init__(self, embedding_size):
super(MobileFaceNet, self).__init__()
self.conv1 = Conv_block(3, 64, kernel=(3, 3), stride=(2, 2), padding=(1, 1))
self.conv2_dw = Conv_block(64, 64, kernel=(3, 3), stride=(1, 1), padding=(1, 1), groups=64)
self.conv_23 = Depth_Wise(64, 64, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=128)
self.conv_3 = Residual(64, num_block=4, groups=128, kernel=(3, 3), stride=(1, 1), padding=(1, 1))
self.conv_34 = Depth_Wise(64, 128, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=256)
self.conv_4 = Residual(128, num_block=6, groups=256, kernel=(3, 3), stride=(1, 1), padding=(1, 1))
self.conv_45 = Depth_Wise(128, 128, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=512)
self.conv_5 = Residual(128, num_block=2, groups=256, kernel=(3, 3), stride=(1, 1), padding=(1, 1))
self.conv_6_sep = Conv_block(128, 512, kernel=(1, 1), stride=(1, 1), padding=(0, 0))
self.conv_6_dw = Linear_block(512, 512, groups=512, kernel=(7, 7), stride=(1, 1), padding=(0, 0))
self.conv_6_flatten = Flatten()
self.linear = Linear(512, embedding_size, bias=False)
self.bn = BatchNorm1d(embedding_size)
def forward(self, x):
out = self.conv1(x)
out = self.conv2_dw(out)
out = self.conv_23(out)
out = self.conv_3(out)
out = self.conv_34(out)
out = self.conv_4(out)
out = self.conv_45(out)
out = self.conv_5(out)
out = self.conv_6_sep(out)
out = self.conv_6_dw(out)
out = self.conv_6_flatten(out)
out = self.linear(out)
out = self.bn(out)
return l2_norm(out)
# Arcface head #############################################################
class Arcface(Module):
# implementation of additive margin softmax loss in https://arxiv.org/abs/1801.05599
def __init__(self, embedding_size=512, classnum=51332, s=64., m=0.5):
super(Arcface, self).__init__()
self.classnum = classnum
self.kernel = Parameter(torch.Tensor(embedding_size, classnum))
# initial kernel
self.kernel.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
self.m = m # the margin value, default is 0.5
self.s = s # scalar value default is 64, see normface https://arxiv.org/abs/1704.06369
self.cos_m = math.cos(m)
self.sin_m = math.sin(m)
self.mm = self.sin_m * m # issue 1
self.threshold = math.cos(math.pi - m)
def forward(self, embbedings, label):
# weights norm
nB = len(embbedings)
kernel_norm = l2_norm(self.kernel, axis=0)
# cos(theta+m)
cos_theta = torch.mm(embbedings, kernel_norm)
# output = torch.mm(embbedings,kernel_norm)
cos_theta = cos_theta.clamp(-1, 1) # for numerical stability
cos_theta_2 = torch.pow(cos_theta, 2)
sin_theta_2 = 1 - cos_theta_2
sin_theta = torch.sqrt(sin_theta_2)
cos_theta_m = (cos_theta * self.cos_m - sin_theta * self.sin_m)
# this condition controls the theta+m should in range [0, pi]
# 0<=theta+m<=pi
# -m<=theta<=pi-m
cond_v = cos_theta - self.threshold
# XXX
cond_mask = cond_v <= 0
keep_val = (cos_theta - self.mm) # when theta not in [0,pi], use cosface instead
cos_theta_m[cond_mask] = keep_val[cond_mask]
output = cos_theta * 1.0 # a little bit hacky way to prevent in_place operation on cos_theta
idx_ = torch.arange(0, nB, dtype=torch.long)
output[idx_, label] = cos_theta_m[idx_, label]
output *= self.s # scale up in order to make softmax work, first introduced in normface
return output
# Cosface head #############################################################
class Am_softmax(Module):
# implementation of additive margin softmax loss in https://arxiv.org/abs/1801.05599
def __init__(self, embedding_size=512, classnum=51332):
super(Am_softmax, self).__init__()
self.classnum = classnum
self.kernel = Parameter(torch.Tensor(embedding_size, classnum))
# initial kernel
self.kernel.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
self.m = 0.35 # additive margin recommended by the paper
self.s = 30. # see normface https://arxiv.org/abs/1704.06369
def forward(self, embbedings, label):
kernel_norm = l2_norm(self.kernel, axis=0)
cos_theta = torch.mm(embbedings, kernel_norm)
cos_theta = cos_theta.clamp(-1, 1) # for numerical stability
phi = cos_theta - self.m
label = label.view(-1, 1) # size=(B,1)
index = cos_theta.data * 0.0 # size=(B,Classnum)
index.scatter_(1, label.data.view(-1, 1), 1)
index = index.byte()
index = index.type(torch.BoolTensor)
output = cos_theta * 1.0
output[index] = phi[index] # only change the correct predicted output
output *= self.s # scale up in order to make softmax work, first introduced in normface
return output
| 15,351 | 37.094293 | 112 | py |
xcos | xcos-master/src/model/model.py | import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) # noqa
import torch
import torch.nn as nn
import torch.nn.functional as F
from .base_model import BaseModel
from .networks import MnistGenerator, MnistDiscriminator
from .face_recog import Backbone_FC2Conv, Backbone, Am_softmax, Arcface
from .xcos_modules import XCosAttention, FrobeniusInnerProduct, GridCos, l2normalize
from utils.util import batch_visualize_xcos
# from utils.global_config import global_config
cosineDim1 = nn.CosineSimilarity(dim=1, eps=1e-6)
class xCosModel(BaseModel):
def __init__(self,
net_depth=50, dropout_ratio=0.6, net_mode='ir_se',
model_to_plugin='CosFace', embedding_size=1568, class_num=9999,
use_softmax=True, softmax_temp=1, draw_qualitative_result=False):
super().__init__()
assert model_to_plugin in ['CosFace', 'ArcFace']
self.attention = XCosAttention(use_softmax=True, softmax_t=1, chw2hwc=True)
self.backbone = Backbone_FC2Conv(net_depth,
dropout_ratio,
net_mode)
self.model_to_plugin = model_to_plugin
if self.model_to_plugin == 'CosFace':
self.head = Am_softmax(embedding_size=embedding_size,
classnum=class_num)
elif self.model_to_plugin == 'ArcFace':
self.head = Arcface(embedding_size=embedding_size,
classnum=class_num)
else:
raise NotImplementedError
self.backbone_target = Backbone(net_depth,
dropout_ratio,
net_mode)
self.frobenius_inner_product = FrobeniusInnerProduct()
self.grid_cos = GridCos() # chw2hwc=True
self.attention.weight_init(mean=0.0, std=0.02)
self.backbone.weight_init(mean=0.0, std=0.02)
self.backbone_target.weight_init(mean=0.0, std=0.02)
self.draw_qualitative_result = draw_qualitative_result
def forward(self, data_dict, scenario="normal"):
model_output = {}
if scenario == 'normal':
img1s, img2s = data_dict['data_input']
label1s, label2s = data_dict['targeted_id_labels']
###############
# imgs = torch.cat((img1s, img2s), 0)
# labels = torch.cat((label1s, label2s), 0)
flatten_feat1s, grid_feat1s = self.backbone(img1s)
flatten_feat2s, grid_feat2s = self.backbone(img2s)
# Part1: FR
theta1s = self.head(flatten_feat1s, label1s)
theta2s = self.head(flatten_feat2s, label2s)
# labels = torch.cat((label1s, label2s), 0)
thetas = torch.cat((theta1s, theta2s), 0)
# model_output["labels"] = labels
model_output["thetas"] = thetas
# loss1 = self.loss_fr(thetas, labels)
# Part2: xCos
attention_maps = self.attention(grid_feat1s, grid_feat2s)
grid_cos_maps = self.grid_cos(grid_feat1s, grid_feat2s)
x_coses = self.frobenius_inner_product(grid_cos_maps, attention_maps)
targeted_coses = self.getCos(img1s, img2s)
model_output["x_coses"] = x_coses
model_output["targeted_cos"] = targeted_coses
elif scenario == 'get_feature_and_xcos':
img1s, img2s = data_dict['data_input']
flatten_feat1s, grid_feat1s = self.backbone(img1s)
flatten_feat2s, grid_feat2s = self.backbone(img2s)
model_output["flatten_feats"] = (flatten_feat1s, flatten_feat2s)
model_output["grid_feats"] = (grid_feat1s, grid_feat2s)
attention_maps = self.attention(grid_feat1s, grid_feat2s)
grid_cos_maps = self.grid_cos(grid_feat1s, grid_feat2s)
x_coses = self.frobenius_inner_product(grid_cos_maps, attention_maps)
model_output["x_coses"] = x_coses
model_output["attention_maps"] = attention_maps
model_output["grid_cos_maps"] = grid_cos_maps
if self.draw_qualitative_result:
img1s = img1s.cpu().numpy()
img2s = img2s.cpu().numpy()
grid_cos_maps = grid_cos_maps.squeeze().detach().cpu().numpy()
attention_maps = attention_maps.squeeze().detach().cpu().numpy()
visualizations = batch_visualize_xcos(img1s, img2s, grid_cos_maps, attention_maps)
model_output["xcos_visualizations"] = visualizations
return model_output
def getCos(self, img1s, img2s):
'''
img1s.size: [bs * 2, c, h, w]
feats: [bs * 2, 512]
feat1: [bs, 512]
cosine:(bs,)
'''
with torch.no_grad():
feat1s = self.backbone_target(img1s)
feat2s = self.backbone_target(img2s)
# half_idx = feats.size(0) // 2
# feat1 = feats[:half_idx]
# feat2 = feats[half_idx:]
feat1s = l2normalize(feat1s)
feat2s = l2normalize(feat2s)
cosine = cosineDim1(feat1s, feat2s)
return cosine
class NormalFaceModel(BaseModel):
def __init__(self,
net_depth=50, dropout_ratio=0.6, net_mode='ir_se',
model_type='CosFace', embedding_size=512, class_num=9999):
super().__init__()
assert model_type in ['CosFace', 'ArcFace']
self.model_type = model_type
if self.model_type == 'CosFace':
self.head = Am_softmax(embedding_size=embedding_size,
classnum=class_num)
elif self.model_type == 'ArcFace':
self.head = Arcface(embedding_size=embedding_size,
classnum=class_num)
else:
raise NotImplementedError
self.backbone = Backbone(net_depth,
dropout_ratio,
net_mode)
self.backbone.weight_init(mean=0.0, std=0.02)
def forward(self, data_dict, scenario="normal"):
model_output = {}
if scenario == 'normal':
img1s, img2s = data_dict['data_input']
label1s, label2s = data_dict['targeted_id_labels']
flatten_feat1s = self.backbone(img1s)
flatten_feat2s = self.backbone(img2s)
# Part1: FR
theta1s = self.head(flatten_feat1s, label1s)
theta2s = self.head(flatten_feat2s, label2s)
thetas = torch.cat((theta1s, theta2s), 0)
model_output["thetas"] = thetas
elif scenario == 'get_feature_and_xcos':
img1s, img2s = data_dict['data_input']
flatten_feat1s = self.backbone(img1s)
flatten_feat2s = self.backbone(img2s)
model_output["flatten_feats"] = (flatten_feat1s, flatten_feat2s)
targeted_coses = self.getCos(img1s, img2s)
model_output["coses"] = targeted_coses
return model_output
def getCos(self, img1s, img2s):
'''
img1s.size: [bs * 2, c, h, w]
feats: [bs * 2, 512]
feat1: [bs, 512]
cosine:(bs,)
'''
with torch.no_grad():
feat1s = self.backbone(img1s)
feat2s = self.backbone(img2s)
# half_idx = feats.size(0) // 2
# feat1 = feats[:half_idx]
# feat2 = feats[half_idx:]
feat1s = l2normalize(feat1s)
feat2s = l2normalize(feat2s)
cosine = cosineDim1(feat1s, feat2s)
return cosine
class MnistModel(BaseModel):
"""
Mnist model demo
"""
def __init__(self, num_classes=10):
super().__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, num_classes)
def forward(self, data_dict):
x = data_dict['data_input']
c1 = F.relu(F.max_pool2d(self.conv1(x), 2))
c2 = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(c1)), 2))
c2_flatten = c2.view(-1, 320)
c2_activation = F.relu(self.fc1(c2_flatten))
c2_dropout = F.dropout(c2_activation, training=self.training)
fc_out = self.fc2(c2_dropout)
out = F.log_softmax(fc_out, dim=1)
return {
"model_output": out
}
class MnistGAN(BaseModel):
def __init__(self, spectral_normalization=True, d=128):
super().__init__()
self.generator = MnistGenerator(d=d)
self.discriminator = MnistDiscriminator(spectral_normalization=spectral_normalization, d=d)
self.generator.weight_init(mean=0.0, std=0.02)
self.discriminator.weight_init(mean=0.0, std=0.02)
def forward(self, data_dict, scenario):
x = data_dict['data_input']
batch_size = x.size(0)
# Generate images from random vector z. When inferencing, it's the only thing we need.
z = torch.randn((batch_size, 100)).view(-1, 100, 1, 1).to(x.device)
G_z = self.generator(z)
model_output = {"G_z": G_z}
if scenario == 'generator_only':
return model_output
# Feed fake images to the discriminator. When training generator, it's the last thing we need.
D_G_z = self.discriminator(G_z).squeeze()
model_output["D_G_z"] = D_G_z
if scenario == 'generator':
return model_output
# Feed real images the discriminator. Only when training discriminator will this be needed.
assert scenario == 'discriminator'
D_x = self.discriminator(x).squeeze()
model_output["D_x"] = D_x
return model_output
| 9,784 | 39.26749 | 102 | py |
xcos | xcos-master/src/model/networks.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import spectral_norm
def normal_init(m, mean, std):
if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d):
m.weight.data.normal_(mean, std)
m.bias.data.zero_()
class MnistGenerator(nn.Module):
# architecture reference: https://github.com/znxlwm/pytorch-MNIST-CelebA-GAN-DCGAN/blob/master/pytorch_MNIST_DCGAN.py # NOQA
def __init__(self, d=128):
super().__init__()
self.deconv1 = nn.ConvTranspose2d(100, d * 8, 4, 1, 0)
self.deconv1_bn = nn.BatchNorm2d(d * 8)
self.deconv2 = nn.ConvTranspose2d(d * 8, d * 4, 4, 2, 1)
self.deconv2_bn = nn.BatchNorm2d(d * 4)
self.deconv3 = nn.ConvTranspose2d(d * 4, d * 2, 4, 2, 1)
self.deconv3_bn = nn.BatchNorm2d(d * 2)
self.deconv4 = nn.ConvTranspose2d(d * 2, d, 4, 2, 1)
self.deconv4_bn = nn.BatchNorm2d(d)
self.deconv5 = nn.ConvTranspose2d(d, 1, 4, 2, 1)
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
def forward(self, input):
# x = F.relu(self.deconv1(input))
x = F.relu(self.deconv1_bn(self.deconv1(input)))
x = F.relu(self.deconv2_bn(self.deconv2(x)))
x = F.relu(self.deconv3_bn(self.deconv3(x)))
x = F.relu(self.deconv4_bn(self.deconv4(x)))
x = torch.tanh(self.deconv5(x))
return x
class MnistDiscriminator(nn.Module):
# architecture reference: https://github.com/znxlwm/pytorch-MNIST-CelebA-GAN-DCGAN/blob/master/pytorch_MNIST_DCGAN.py # NOQA
def __init__(self, d=32, spectral_normalization=True):
super().__init__()
self.conv1 = nn.Conv2d(1, d, 4, 2, 1)
self.conv2 = nn.Conv2d(d, d * 2, 4, 2, 1)
self.conv2_bn = nn.BatchNorm2d(d * 2)
self.conv3 = nn.Conv2d(d * 2, d * 4, 4, 2, 1)
self.conv3_bn = nn.BatchNorm2d(d * 4)
self.conv4 = nn.Conv2d(d * 4, d * 8, 4, 2, 1)
self.conv4_bn = nn.BatchNorm2d(d * 8)
self.conv5 = nn.Conv2d(d * 8, 1, 4, 1, 0)
if spectral_normalization:
for attr_name in [f'conv{i}' for i in range(1, 6)]:
new_attr = spectral_norm(getattr(self, attr_name))
setattr(self, attr_name, new_attr)
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
def forward(self, input):
x = F.leaky_relu(self.conv1(input), 0.2)
x = F.leaky_relu(self.conv2_bn(self.conv2(x)), 0.2)
x = F.leaky_relu(self.conv3_bn(self.conv3(x)), 0.2)
x = F.leaky_relu(self.conv4_bn(self.conv4(x)), 0.2)
x = torch.sigmoid(self.conv5(x))
return x
| 2,779 | 38.714286 | 129 | py |
xcos | xcos-master/src/model/__init__.py | 0 | 0 | 0 | py | |
xcos | xcos-master/src/model/xcos_modules.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .networks import normal_init
cos = nn.CosineSimilarity(dim=1, eps=1e-6)
def l2normalize(x):
return F.normalize(x, p=2, dim=1)
class FrobeniusInnerProduct(nn.Module):
def __init__(self):
super(FrobeniusInnerProduct, self).__init__()
def forward(self, grid_cos_map, attention_map):
""" Compute the Frobenius inner product
with grid cosine map and attention map.
Args:
grid_cos_map (Tensor of size([bs, 7, 7, 1]))
attention_map (Tensor of size([bs, 7, 7, 1])
Returns:
Tensor of size [bs, 1]: aka. xCos values
"""
attentioned_gird_cos = (grid_cos_map * attention_map)
# attentioned_gird_cos: torch.Size([bs, 7, 7, 1]) ->[bs, 49]
attentioned_gird_cos = attentioned_gird_cos.view(attentioned_gird_cos.size(0), -1)
frobenius_inner_product = attentioned_gird_cos.sum(1)
return frobenius_inner_product
class GridCos(nn.Module):
def __init__(self):
super(GridCos, self).__init__()
def forward(self, feat_grid_1, feat_grid_2):
""" Compute the grid cos map with 2 input conv features
Args:
feat_grid_1 ([type]): [description]
feat_grid_2 ([type]): [description]
Returns:
Tensor of size([bs, 7, 7, 1]: [description]
"""
feat_grid_1 = feat_grid_1.permute(0, 2, 3, 1) # CHW to HWC
feat_grid_2 = feat_grid_2.permute(0, 2, 3, 1)
output_size = feat_grid_1.size()[0:3] + torch.Size([1])
feat1 = feat_grid_1.contiguous().view(-1, feat_grid_1.size(3))
feat2 = feat_grid_2.contiguous().view(-1, feat_grid_2.size(3))
feat1 = l2normalize(feat1)
feat2 = l2normalize(feat2)
grid_cos_map = cos(feat1, feat2).view(output_size)
return grid_cos_map
class XCosAttention(nn.Module):
def __init__(self, use_softmax=True, softmax_t=1, chw2hwc=True):
super(XCosAttention, self).__init__()
self.embedding_net = nn.Sequential(
nn.Conv2d(32, 16, 3, padding=1),
nn.BatchNorm2d(16),
nn.PReLU())
self.attention = nn.Sequential(
nn.Conv2d(32, 16, 3, padding=1),
nn.BatchNorm2d(16),
nn.PReLU(),
nn.Conv2d(16, 1, 3, padding=1),
nn.BatchNorm2d(1),
nn.PReLU(),
)
self.name = 'AttenCosNet'
self.USE_SOFTMAX = use_softmax
self.SOFTMAX_T = softmax_t
self.chw2hwc = chw2hwc
def softmax(self, x, T=1):
x /= T
return F.softmax(x.reshape(x.size(0), x.size(1), -1), 2).view_as(x)
def divByNorm(self, x):
'''
attention_weights.size(): [bs, 1, 7, 7]
'''
x -= x.view(x.size(0),
x.size(1), -1).min(dim=2)[0].repeat(1,
1,
x.size(2) * x.size(3)).view(x.size(0),
x.size(1),
x.size(2),
x.size(3))
x /= x.view(x.size(0),
x.size(1), -1).sum(dim=2).repeat(1,
1,
x.size(2) * x.size(3)).view(x.size(0),
x.size(1),
x.size(2),
x.size(3))
return x
def forward(self, feat_grid_1, feat_grid_2):
'''
feat_grid_1.size(): [bs, 32, 7, 7]
attention_weights.size(): [bs, 1, 7, 7]
'''
# XXX Do I need to normalize grid_feat?
conv1 = self.embedding_net(feat_grid_1)
conv2 = self.embedding_net(feat_grid_2)
fused_feat = torch.cat((conv1, conv2), dim=1)
attention_weights = self.attention(fused_feat)
# To Normalize attention
if self.USE_SOFTMAX:
attention_weights = self.softmax(attention_weights, self.SOFTMAX_T)
else:
attention_weights = self.divByNorm(attention_weights)
if self.chw2hwc:
attention_weights = attention_weights.permute(0, 2, 3, 1)
return attention_weights
def weight_init(self, mean, std):
for m in self._modules:
normal_init(self._modules[m], mean, std)
# class AttentionCosNet(nn.Module):
# def __init__(self):
# super(AttentionCosNet, self).__init__()
# self.embedding_net = nn.Sequential(
# nn.Conv2d(512, 256, 3, padding=1),
# nn.BatchNorm2d(256),
# nn.PReLU()
# )
# self.attention = nn.Sequential(
# nn.Conv2d(512, 256, 3, padding=1),
# nn.BatchNorm2d(256),
# nn.PReLU(),
# nn.Conv2d(256, 1, 3, padding=1),
# nn.BatchNorm2d(1),
# nn.PReLU(),
# )
# self.name = 'AttentionCosNet'
# def softmax(self, x):
# return F.softmax(x.reshape(x.size(0), x.size(1), -1), 2).view_as(x)
# def forward(self, x1, x2):
# '''
# x1.size(): [bs, 512, 7, 6]
# attention_weights.size(): [bs, 1, 7, 6]
# '''
# conv1 = self.embedding_net(x1)
# conv2 = self.embedding_net(x2)
# fused_feat = torch.cat((conv1, conv2), dim=1)
# attention_weights = self.attention(fused_feat)
# # XXX: I use softmax instead of normalize
# # attention_weights = F.normalize(attention_weights, p=2, dim=1)
# attention_weights = self.softmax(attention_weights)
# return x1, x2, attention_weights
# class EmbeddingNet(nn.Module):
# def __init__(self):
# super(EmbeddingNet, self).__init__()
# self.convnet = nn.Sequential(nn.Conv2d(1, 32, 5), nn.PReLU(),
# nn.MaxPool2d(2, stride=2),
# nn.Conv2d(32, 64, 5), nn.PReLU(),
# nn.MaxPool2d(2, stride=2))
# self.fc = nn.Sequential(nn.Linear(64 * 4 * 4, 256),
# nn.PReLU(),
# nn.Linear(256, 256),
# nn.PReLU(),
# nn.Linear(256, 2)
# )
# def forward(self, x):
# output = self.convnet(x)
# output = output.view(output.size()[0], -1)
# output = self.fc(output)
# return output
# def get_embedding(self, x):
# return self.forward(x)
# class EmbeddingNetL2(EmbeddingNet):
# def __init__(self):
# super(EmbeddingNetL2, self).__init__()
# def forward(self, x):
# output = super(EmbeddingNetL2, self).forward(x)
# output /= output.pow(2).sum(1, keepdim=True).sqrt()
# return output
# def get_embedding(self, x):
# return self.forward(x)
# class ClassificationNet(nn.Module):
# def __init__(self, embedding_net, n_classes):
# super(ClassificationNet, self).__init__()
# self.embedding_net = embedding_net
# self.n_classes = n_classes
# self.nonlinear = nn.PReLU()
# self.fc1 = nn.Linear(2, n_classes)
# def forward(self, x):
# output = self.embedding_net(x)
# output = self.nonlinear(output)
# scores = F.log_softmax(self.fc1(output), dim=-1)
# return scores
# def get_embedding(self, x):
# return self.nonlinear(self.embedding_net(x))
# class SiameseNet(nn.Module):
# def __init__(self, embedding_net):
# super(SiameseNet, self).__init__()
# self.embedding_net = embedding_net
# def forward(self, x1, x2):
# output1 = self.embedding_net(x1)
# output2 = self.embedding_net(x2)
# return output1, output2
# def get_embedding(self, x):
# return self.embedding_net(x)
# class TripletNet(nn.Module):
# def __init__(self, embedding_net):
# super(TripletNet, self).__init__()
# self.embedding_net = embedding_net
# def forward(self, x1, x2, x3):
# output1 = self.embedding_net(x1)
# output2 = self.embedding_net(x2)
# output3 = self.embedding_net(x3)
# return output1, output2, output3
# def get_embedding(self, x):
# return self.embedding_net(x)
# class ENMSiameseNet(nn.Module):
# def __init__(self, embedding_net):
# super(ENMSiameseNet, self).__init__()
# self.embedding_net = embedding_net
# self.name = 'Siamese'
# def forward(self, x1, x2):
# output1 = self.embedding_net(x1)
# output2 = self.embedding_net(x2)
# return output1, output2
# def get_embedding(self, x):
# return self.embedding_net(x)
# class ENMTripletNet(nn.Module):
# def __init__(self, embedding_net):
# super(ENMTripletNet, self).__init__()
# self.embedding_net = embedding_net
# self.name = 'Triplet'
# def forward(self, x1, x2, x3):
# output1 = self.embedding_net(x1)
# output2 = self.embedding_net(x2)
# output3 = self.embedding_net(x3)
# return output1, output2, output3
# def get_embedding(self, x):
# return self.embedding_net(x)
# class ENMEmbeddingNet(nn.Module):
# def __init__(self):
# super(ENMEmbeddingNet, self).__init__()
# self.fc = nn.Sequential(nn.Linear(1024, 1024),
# nn.PReLU(),
# nn.Dropout(p=0.5),
# nn.Linear(1024, 1024),
# nn.PReLU(),
# nn.Dropout(p=0.5),
# nn.Linear(1024, 1024)
# )
# self.name = 'ENMEmb'
# def forward(self, x):
# output = self.fc(x)
# return output
# def get_embedding(self, x):
# return self.forward(x)
| 10,474 | 33.916667 | 94 | py |
xcos | xcos-master/src/model/metric.py | import os
import torch
from abc import abstractmethod
import tempfile
import numpy as np
from torchvision import transforms
from utils.util import DeNormalize, lib_path, import_given_path
from utils.verification import evaluate_accuracy
from utils.logging_config import logger
class BaseMetric(torch.nn.Module):
def __init__(self, output_key, target_key, nickname, scenario='training'):
super().__init__()
self.nickname = nickname
self.output_key = output_key
self.target_key = target_key
self.scenario = scenario
@abstractmethod
def clear(self):
""" Initialize variables needed for metrics calculations.
This function would be called in TrainingWorker._init_output()
See the TopKAcc below for example.
"""
pass
@abstractmethod
def update(self, data, output):
""" Update metric values in each batch.
This function would be called inside torch.no_grad() in WorkerTemplate._update_all_metrics()
"""
pass
@abstractmethod
def finalize(self):
""" Calculate the final metric values given the variables updated in each batch. """
pass
class TestMetric(BaseMetric):
def __init__(self, k, output_key, target_key, nickname=None, scenario='training'):
nickname = f'top{self.k}_acc_{target_key}' if nickname is None else nickname
super().__init__(output_key, target_key, nickname, scenario)
self.k = k
def clear(self):
self.total_correct = 0
self.total_number = 0
def update(self, data, output):
self.total_correct += 2
self.total_number += 1
return self.total_correct / self.total_number
def finalize(self):
return self.total_correct / self.total_number
class VerificationMetric(BaseMetric):
def __init__(self, output_key, target_key,
nickname=None, num_of_folds=5, scenario='validation'):
nickname = f"verificatoin_acc_{target_key}" if nickname is None else nickname
super().__init__(output_key, target_key, nickname, scenario)
self.num_of_folds = num_of_folds
self.cos_values = []
self.is_same_ground_truth = []
def clear(self):
self.cos_values = []
self.is_same_ground_truth = []
def update(self, data, output):
self.cos_values.append(output[self.output_key].cpu().numpy())
self.is_same_ground_truth.append(data[self.target_key].cpu().numpy())
return None
def finalize(self):
self.cos_values = np.concatenate(self.cos_values, axis=None)
self.is_same_ground_truth = np.concatenate(self.is_same_ground_truth, axis=None)
accuracy, threshold, roc_tensor = self.evaluate_and_plot_roc(
self.cos_values, self.is_same_ground_truth, self.num_of_folds
)
logger.info(f">>>> In verification metric, accuracy:{accuracy}, threshold: {threshold}")
return accuracy
def evaluate_and_plot_roc(self, coses, issame, nrof_folds=5):
accuracy, best_thresholds, roc_curve_tensor = evaluate_accuracy(
coses, issame, nrof_folds
)
return accuracy.mean(), best_thresholds.mean(), roc_curve_tensor
class TopKAcc(BaseMetric):
def __init__(self, k, output_key, target_key, nickname=None):
nickname = f'top{self.k}_acc_{target_key}' if nickname is None else nickname
super().__init__(output_key, target_key, nickname)
self.k = k
def clear(self):
self.total_correct = 0
self.total_number = 0
def update(self, data, output):
logits = output[self.output_key]
target = data[self.target_key]
pred = torch.topk(logits, self.k, dim=1)[1]
assert pred.shape[0] == len(target)
correct = 0
for i in range(self.k):
correct += torch.sum(pred[:, i] == target).item()
self.total_correct += correct
self.total_number += len(target)
return correct / len(target)
def finalize(self):
return self.total_correct / self.total_number
class FIDScoreOffline(BaseMetric):
"""
Module calculating FID score by saving all images into temporary directories
"""
fid_score = import_given_path("fid_score", os.path.join(lib_path, 'pytorch_fid/fid_score.py'))
def __init__(self, output_key, target_key, unnorm_mean=(0.5,), unnorm_std=(0.5,), nickname="FID_InceptionV3"):
super().__init__(output_key, target_key, nickname)
self.from_tensor_to_pil = transforms.Compose([
DeNormalize(unnorm_mean, unnorm_mean),
transforms.ToPILImage()
])
self.tmp_gt_dir = tempfile.TemporaryDirectory(prefix='gt_')
self.tmp_out_dir = tempfile.TemporaryDirectory(prefix='out_')
def clear(self):
self.tmp_gt_dir.cleanup()
self.tmp_out_dir.cleanup()
self.tmp_gt_dir = tempfile.TemporaryDirectory(prefix='gt_')
self.tmp_out_dir = tempfile.TemporaryDirectory(prefix='out_')
def _save_img_tensor(self, tensor, buffer_dir):
""" Save image tensor to a named temporary file and return the name."""
temp_f = tempfile.NamedTemporaryFile(suffix='.png', dir=buffer_dir.name, delete=False)
pil_image = self.from_tensor_to_pil(tensor.cpu())
pil_image.save(temp_f)
temp_f.close()
def update(self, data, output):
for gt_tensor, out_tensor in zip(data[self.target_key], output[self.output_key]):
self._save_img_tensor(gt_tensor, self.tmp_gt_dir)
self._save_img_tensor(out_tensor.clamp(-1, 1), self.tmp_out_dir)
return None
def finalize(self):
return self.fid_score.calculate_fid_given_paths(
paths=[self.tmp_gt_dir.name, self.tmp_out_dir.name],
batch_size=10, cuda=True, dims=2048)
class FIDScore(BaseMetric):
"""
Abstract class of FID score calculator (store inception activation in memory)
"""
fid_score = import_given_path("fid_score", os.path.join(lib_path, 'pytorch_fid/fid_score.py'))
def __init__(self, output_key, target_key, unnorm_mean=(0.5,), unnorm_std=(0.5,), nickname="FID_InceptionV3"):
super().__init__(output_key, target_key, nickname)
self._deNormalizer = DeNormalize(unnorm_mean, unnorm_mean)
self._gt_activations = []
self._out_activations = []
def clear(self):
self._gt_activations = []
self._out_activations = []
def _preprocess_tensor(self, tensor):
tensor = self._deNormalizer(tensor) # domain: [-1, 1] -> [0, 1]
tensor = tensor.repeat(1, 3, 1, 1) # convert 1-channel images to 3-channels
return tensor
@abstractmethod
def _get_activation(self, tensors):
pass
def update(self, data, output):
gt_tensors = self._preprocess_tensor(data[self.target_key])
out_tensors = self._preprocess_tensor(output[self.output_key])
self._gt_activations.append(self._get_activation(gt_tensors))
self._out_activations.append(self._get_activation(out_tensors))
return None
def finalize(self):
gt_activations = np.concatenate(self._gt_activations)
out_activations = np.concatenate(self._out_activations)
score = self._get_fid_score(gt_activations, out_activations)
return score
def _get_fid_score(self, gt_activations, out_activations):
"""
Given two distribution of features, compute the FID score between them
"""
m1 = np.mean(gt_activations, axis=0)
m2 = np.mean(out_activations, axis=0)
s1 = np.cov(gt_activations, rowvar=False)
s2 = np.cov(out_activations, rowvar=False)
return self.fid_score.calculate_frechet_distance(m1, s1, m2, s2)
class FIDScoreInceptionV3(FIDScore):
inception = import_given_path("inception", os.path.join(lib_path, 'pytorch_fid/inception.py'))
def __init__(self, *args, **kargs):
super().__init__(*args, **kargs)
block_idx = self.inception.InceptionV3.BLOCK_INDEX_BY_DIM[2048]
self._backbone = self.inception.InceptionV3([block_idx])
self._backbone.eval()
def _get_activation(self, tensors):
return self._backbone(tensors)[0].squeeze().cpu().numpy()
| 8,283 | 35.982143 | 114 | py |
tinker | tinker-master/tinker-build/tinker-patch-cli/tool_output/merge_mapping.py | #!/usr/bin/python
# coding: utf-8
"""
当工程使用了applymapping之后,会遇到这样的问题
1.类和方法上个版本被keep住了,这个版本不keep
2.类和方法上个版本没有被keep住,这个版本又keep住了
这两个问题会导致proguard报warning,官方建议是手动解决冲突
(http://proguard.sourceforge.net/manual/troubleshooting.html#mappingconflict1)
不解决的话默认以mapping文件为最高优先级处理,这样混淆会带来一些问题
该方案为
简单来说,上个版本的mapping称为mapping1,直接编译当前项目,获取当前的mapping文件,称为mapping2。
从mapping2中可以得到当前项目需要混淆的类,因为重用mapping的意义在于同样的类在不同版本中混淆后的名称保持一致性,
所以将mapping2里面的所有类和方法统统在mapping1中去查找对应的混淆名称,生成新的mapping3,找不到则不写入mapping3,
然后mapping3就是最后可以使用的mapping文件。最后重用mapping3来编译当前项目,完成打包整个过程
最后生成的mapping会保留当前版本和之前版本都需要混淆的类和方法,且混淆后的名字取之前的mapping版本中的名字,
对于keep状态冲突的类和方法,处理方式是不保留在新生成的mapping中,编译过程中由当前的proguard的配置文件来处理
so 新生成的mapping是之前版本mapping的一个子集
使用教程是传入上版本的mapping和当前项目未applymapping得到的mapping文件,输出处理后的mapping 文件。
"""
import os
import sys
def print_usage():
print >>sys.stderr, \
"""usage: python merge_mapping.py old_mapping.txt current_mapping.txt
the output mapping file is 'new_mapping.txt' in the cwd directory
"""
sys.exit(1)
class MappingData:
def __init__(self):
self.raw_line = ""
self.key = ""
self.field_methods = []
class DealWithProguardWarning:
def __init__(self):
self.classes = {}
self.class_list = []
self.current_classes = {}
self.current_class_list = []
@staticmethod
def read_mapping_file(classes, class_list, mapping):
current_mapping_data = None
with open(mapping, 'r') as fd:
# 一行一行读取
for line in fd.xreadlines():
# 如果不是空格开头,类的处理
if not line.startswith(' '):
# 对象不为空,先保存之前的
if current_mapping_data is not None:
classes[current_mapping_data.key] = current_mapping_data
class_list.append(current_mapping_data.key)
# 重新创建对象,并赋值
current_mapping_data = MappingData()
current_mapping_data.raw_line = line
current_mapping_data.key = line.split('->')[0].strip()
else:
# 方法的处理,直接加进去
current_mapping_data.field_methods.append(line)
classes[current_mapping_data.key] = current_mapping_data
class_list.append(current_mapping_data.key)
print "size: ", len(classes)
def remove_warning_mapping(self, old_mapping, current_mapping):
self.read_mapping_file(self.classes, self.class_list, old_mapping)
self.read_mapping_file(self.current_classes, self.current_class_list, current_mapping)
self.do_merge()
self.print_new_mapping()
def exe(self, args):
if len(args) < 2:
print_usage()
old_mapping_path = args[0]
if not os.path.exists(old_mapping_path):
raise Exception("mapping file is not exist, path=%s", old_mapping_path)
current_mapping_path = args[1]
if not os.path.exists(current_mapping_path):
raise Exception("proguard warning file is not exist, path=%s", current_mapping_path)
self.remove_warning_mapping(old_mapping_path, current_mapping_path)
def do_merge(self):
# 遍历当前的mapping class_key
for key in self.current_class_list:
if key in self.classes:
data = self.classes[key]
current_data = self.current_classes[key]
# 如果当前的类没有被混淆,则保留,否则用之前的mapping里面的内容覆盖
# ___.___ -> ___.___:
if current_data.raw_line.split("->")[0] != current_data.raw_line.split("->")[1][:-1]:
current_data.raw_line = data.raw_line
new_method_list = []
# 处理方法
for line in current_data.field_methods:
result, new_line = self.find_same_methods(line, data)
# 只有找到才写入
if result:
new_method_list.append(new_line)
current_data.field_methods = new_method_list
# 新的混淆不在旧的里面,则删除
else:
del self.current_classes[key]
def find_same_methods(self, line, data):
search_name, search_complete_name, search_new_name = self.get_name_and_complete_name_and_new_name(line)
# 这里是特殊情况,如果在当前mapping发现查找的这个并没有混淆,就不打算保留在mapping文件中
if search_name == search_new_name:
return False, ""
for method_line in data.field_methods:
target_name, target_complete_name, target_new_name = self.get_name_and_complete_name_and_new_name(method_line)
# 这里必须要用最完整的信息来进行比较,避免重载的影响
if search_complete_name == target_complete_name:
print "1"
return True, method_line
print "0"
return False, ""
# 返回名字 包含返回值和参数的名字 和 混淆后的名字
@staticmethod
def get_name_and_complete_name_and_new_name(line):
""" ___ ___ -> ___
___:___:___ ___(___) -> ___
___:___:___ ___(___):___ -> ___
___:___:___ ___(___):___:___ -> ___
"""
no_space_line = line.strip()
colonIndex1 = no_space_line.find(":")
colonIndex2 = no_space_line.find(":", colonIndex1+1) if colonIndex1 != -1 else -1
spaceIndex = no_space_line.find(" ", colonIndex2+2)
argumentIndex1 = no_space_line.find("(", spaceIndex+1)
argumentIndex2 = no_space_line.find(")", argumentIndex1+1) if argumentIndex1 != -1 else -1
colonIndex3 = no_space_line.find(":", argumentIndex2+1) if argumentIndex2 != -1 else -1
colonIndex4 = no_space_line.find(":", colonIndex3+1) if colonIndex3 != -1 else -1
arrowIndex = no_space_line.find("->")
if spaceIndex < 0 or arrowIndex < 0:
raise Exception("can not parse line %s", no_space_line)
name = no_space_line[spaceIndex + 1: argumentIndex1 if argumentIndex1 >= 0 else arrowIndex].strip()
new_name = no_space_line[arrowIndex + 2:].strip()
complete_name = no_space_line[colonIndex2 + 1:arrowIndex].strip()
return name, complete_name, new_name
def print_new_mapping(self):
output_path = os.path.join(os.getcwd(), "new_mapping.txt")
with open(output_path, "w") as fw:
for key in self.current_class_list:
if key in self.current_classes:
data = self.current_classes[key]
fw.write(data.raw_line)
for line in data.field_methods:
fw.write(line)
if __name__ == '__main__':
DealWithProguardWarning().exe(sys.argv[1:])
| 6,630 | 38.945783 | 122 | py |
tinker | tinker-master/tinker-build/tinker-patch-cli/tool_output/proguard_warning.py | #!/usr/bin/python
# coding: utf8
import os
import sys
def print_usage():
print >>sys.stderr, \
"""usage: python proguard_warning.py mapping.txt warning.txt
the output mapping file is 'mapping_edit.txt' in the cwd directory
"""
sys.exit(1)
class MappingData:
raw_line = ""
key = ""
filed_methods = []
def __init__(self):
self.raw_line = ""
self.key = ""
self.filed_methods = []
class RemoveProguardWarning:
def __init__(self):
self.classes = {}
self.class_list = []
def read_mapping_file(self, mapping):
current_mapping_data = None
with open(mapping) as fd:
for line in fd.readlines():
if not line.startswith(' '):
if current_mapping_data is not None:
self.classes[current_mapping_data.key] = current_mapping_data
self.class_list.append(current_mapping_data.key)
current_mapping_data = MappingData()
current_mapping_data.raw_line = line
current_mapping_data.key = line.split('->')[0].strip()
else:
current_mapping_data.filed_methods.append(line)
self.classes[current_mapping_data.key] = current_mapping_data
self.class_list.append(current_mapping_data.key)
# print "size: ", len(self.classes)
def remove_warning(self, warning):
with open(warning) as fd:
for line in fd.readlines():
if not line.startswith("Warning:"):
raise Exception("proguard warning must begin with 'Warning:', line=", line)
splits = line.split(':')
class_key = splits[1].strip()
# print "class_key", class_key
if class_key not in self.classes:
print "Warning:can't find warning class in the mapping file, class=", class_key
continue
warning_value = splits[2].split("'")[1] + " -> " + splits[2].split("'")[5]
mapping_data = self.classes[class_key]
# print "warning_value", warning_value
find = False
for mappings in mapping_data.filed_methods:
if mappings.find(warning_value) != -1:
mapping_data.filed_methods.remove(mappings)
find = True
break
if not find:
print "Warning: can't find warning field or method in the mapping file:', value=", warning_value
if len(mapping_data.filed_methods) == 0:
del self.classes[class_key]
output_path = os.path.join(os.getcwd(), "mapping_edit.txt")
with open(output_path, "w") as fw:
for key in self.class_list:
if key in self.classes:
data = self.classes[key]
fw.write(data.raw_line)
for line in data.filed_methods:
fw.write(line)
def remove_warning_mapping(self, mapping, warning):
self.read_mapping_file(mapping)
self.remove_warning(warning)
def do_command(self, args):
if (len(args) < 2):
print_usage()
mapping_path = args[0]
if not os.path.exists(mapping_path):
raise Exception("mapping file is not exist, path=%s", mapping_path)
warning_patch = args[1]
if not os.path.exists(warning_patch):
raise Exception("proguard warning file is not exist, path=%s", warning_patch)
self.remove_warning_mapping(mapping_path, warning_patch)
if __name__ == '__main__':
RemoveProguardWarning().do_command(sys.argv[1:]) | 3,821 | 34.06422 | 116 | py |
Cellibrium | Cellibrium-master/Percolibrium/Percolators/python/anomalysensor.py |
import math
import cPickle
FORGETRATE = 0.5
def update_real_Q(qname, newq):
oldav = 0
oldvar = 0.1
[state, oldav, oldvar] = load_special_Q(qname, oldav, oldvar)
if state == True:
if oldvar == 0:
oldvar = 0.5
nextav = w_average(newq, oldav)
newvar = (newq-oldav)*(newq-oldav)
nextvar = w_average(newvar,oldvar);
devq = math.sqrt(oldvar)
if devq<0.1:
devq = 0.1
if newq > (oldav + 3*devq):
print '!! [pr] Process anomaly '+str(qname)+'_high_anomaly '+'('+str(newq)+' > '+str(oldav)+' + '+str(3*devq)+')'
elif newq < (oldav - 3*devq):
print '!! [pr] Process anomaly '+str(qname)+'_low_anomaly '+'('+str(newq)+' < '+str(oldav)+' - '+str(3*devq)+')'
save_special_Q(qname,nextav,nextvar)
else:
nextav = w_average(newq,0);
nextvar = w_average(newq/2,0);
save_special_Q(qname,nextav,nextvar)
def save_special_Q(name, av, var):
vec = {'name':name, 'av':av, 'var': var}
try:
with open(str(name)+'.pkl', 'wb') as fid:
cPickle.dump(vec, fid)
print 'Saved updated values ('+str(av)+','+str(var)+') in tmp/' +str(name)
return True
except:
print 'unable to save data'
return False
def load_special_Q(name, oldq, oldvar):
try:
with open(str(name)+'.pkl', 'rb') as fid:
data = cPickle.load(fid)
oldq = data['av']
oldvar = data['var']
print 'Got previous average '+str(oldq)+', std_dev '+ str(math.sqrt(oldvar))
return True, oldq, oldvar
except:
print " - no previous value for "+str(name);
return False, oldq, oldvar
def w_average(anew, aold):
av= 9999999.0
cf_sane_monitor_limit = 9999999.0
wnew = (1.0 - FORGETRATE);
wold = FORGETRATE;
av = (wnew * anew + wold * aold);
if av < 0:
return 0.0;
return av
if __name__ == "__main__":
data = [154.4, 155, 144, 234, 0]
for val in data:
if val !=0:
update_real_Q('sensor',val)
| 2,249 | 23.456522 | 125 | py |
Cellibrium | Cellibrium-master/Percolibrium/Percolators/python/load_env_graph.py | #!/usr/bin/env python
from lib.neo4j import Neo4j
import urllib, urllib2, json, sys, os, time, pprint, time, pyinotify, glob
config = {}
execfile("conf/config.conf", config)
neo4j = Neo4j(config['neo4j_url'], config['neo4j_user'], config['neo4j_pass'])
pp = pprint.PrettyPrinter(indent=4)
def insert_into_db():
while True:
mylist = []
nofiles = 0
for fn in glob.glob('data/env_graph.[0-9]*'):
nofiles += 1
with open(fn) as f:
l = [ i.rstrip('\n').lstrip('(').rstrip(')').split(',') for i in f ]
mylist.extend(l)
f.close()
os.remove(fn)
#res.append(mylist)
statements = []
for l in mylist:
if l[1] < 0:
l[1] *= -1
merge1 = 'MERGE (n)<-[:`%s` {type:{two}, b: {three}}]-(m)' % (l[4])
merge2 = 'MERGE (n)-[:`%s` {type:{two}, b: {five}}]->(m)' % (l[2])
else:
merge1 = 'MERGE (n)-[:`%s` {type:{two}, b: {five}}]->(m)' % (l[2])
merge2 = 'MERGE (n)<-[:`%s` {type:{two}, b: {three}}]-(m)' % (l[4])
statements.append({
'statement': (
# 'MERGE (n:Label {Name: {one}})'
# 'MERGE (m:Label {Name: {three}})'
'MERGE (n:CGN {Name: {one}})'
'MERGE (m:CGN {Name: {four}})'
'%s %s' % (merge1, merge2)
#'MERGE (n)-[:`%s` {type: %s, b:`%s`}]-(m)' % (l[2],l[1],l[3])
# 'MERGE (n)-[:`%s` {type:{three}]-(m)' % (l[2])
# 'MERGE (m)-[:`%s` {type:{three}, b: {two}}]->(m)' % (l[4])
),
'parameters': {
'one': l[0],
'two': l[1],
'three' : l[2],
'four' : l[3],
'five' : l[4],
}
})
#pp.pprint(statements)
print time.strftime("%d/%m/%Y %H:%M:%S") + " - STARTING LOAD (%d files)" % (nofiles)
r = neo4j.neo4j_rest_transaction_commit({'statements': statements})
print time.strftime("%d/%m/%Y %H:%M:%S") + " - COMPLETED LOAD"
time.sleep(5)
# Create index on CGN
q = {
'statements':
[
{
'statement': 'CREATE INDEX ON :CGN(Name)'
}
]
}
r = neo4j.neo4j_rest_transaction_commit(q)
time.sleep(5)
insert_into_db()
| 1,936 | 23.2125 | 86 | py |
Cellibrium | Cellibrium-master/Percolibrium/Percolators/python/mon_env_graph.py | #!/usr/bin/env python
from lib.neo4j import Neo4j
import urllib, urllib2, json, sys, os, time, pprint, time, pyinotify, glob
config = {}
execfile("conf/config.conf", config)
neo4j = Neo4j(config['neo4j_url'], config['neo4j_user'], config['neo4j_pass'])
pp = pprint.PrettyPrinter(indent=4)
res = []
mylist = []
class ProcessTransientFile(pyinotify.ProcessEvent):
def process_IN_MOVED_TO(self, event):
# We have explicitely registered for this kind of event.
print '\t', event.pathname, ' -> written'
f = 'data/env_graph.%d' % (time.time())
os.rename(event.pathname, f)
# print time.strftime("%d/%m/%Y %H:%M:%S") + " - MOVING env_graph TO %s" % (f)
# def process_default(self, event):
# print 'default: ', event.maskname
def watch_files():
wm = pyinotify.WatchManager()
notifier = pyinotify.Notifier(wm)
# In this case you must give the class object (ProcessTransientFile)
# as last parameter not a class instance.
wm.watch_transient_file('/home/ubuntu/.CGNgine/state/env_graph', pyinotify.ALL_EVENTS, ProcessTransientFile)
notifier.loop()
watch_files()
| 1,078 | 28.972222 | 109 | py |
Cellibrium | Cellibrium-master/Percolibrium/Percolators/python/logging.py |
########################################################################################################
#
# Examples, how to encode logs as semantic graphs
#
########################################################################################################
import sys
import time
import socket
from cellibrium import Cellibrium
########################################################################################################
c = Cellibrium()
########################################################################################################
#
# HADOOP semantics
#
# NameNode is the centerpiece of HDFS.
# NameNode is also known as the Master
# NameNode only stores the metadata of HDFS, the directory tree of all files in the file system, and tracks the files across the cluster.
# NameNode does not store the actual data or the dataset. The data itself is actually stored in the DataNodes.
# NameNode knows the list of the blocks and its location for any given file in HDFS. NameNode knows how to construct the file from blocks.
# NameNode is so critical to HDFS and when the NameNode is down, HDFS/Hadoop cluster is inaccessible and considered down.
# NameNode is a single point of failure in Hadoop cluster.
#
# DataNode is responsible for storing the actual data in HDFS.
# DataNode is also known as the Slave
# NameNode and DataNode are in constant communication.
# When a DataNode starts up it announce itself to the NameNode along with the list of blocks it is responsible for.
# When a DataNode is down, redundant backup. NameNode arranges for replication for the blocks managed by the DataNode that is not available.
# DOCS https://www.cloudera.com/documentation/enterprise/latest/topics/cdh_ig_ports_cdh5.html
#######################################################################################################
# This sensor log location for all data parsed
thishost = socket.gethostname()
print "This host is " + thishost
here = c.HereGr(sys.stdout,"NYC cloud")
icontext = "hadoop HDFS service"
#
# In these examples IP(thishost) seems to be 192.168.7.210 (
# nodemanager seems to be 192.168.5.65
# src address = client address src 192.168.5.176:34987 (write)
# dest address = datanode dst 192.168.5.55:50010 (but why not the same as 210?) replica ??
# From namenode logs, data replicas are pipelined in order from 1st to last
# 2017-01-20 15:41:30,158 INFO org.apache.hadoop.hdfs.StateChange: BLOCK* allocateBlock: /tmp/hadoop-yarn/staging/crluser/.staging/job_1484893655240_0007/job.split. BP-1060243018-192.168.7.210-1475739466529 blk_1073742597_1775{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1,
#replicas=[
# ReplicaUnderConstruction[[DISK]DS-0503a590-18a8-4201-bc23-7da0bbf9dfa5:NORMAL:192.168.5.65:50010|RBW],
# ReplicaUnderConstruction[[DISK]DS-201f600e-1246-436b-85ed-567351cd75ef:NORMAL:192.168.5.56:50010|RBW],
# ReplicaUnderConstruction[[DISK]DS-f0f2ca98-8c60-4ed9-917c-a58ebba5e325:NORMAL:192.168.5.175:50010|RBW],
# ReplicaUnderConstruction[[DISK]DS-fe82d9ee-25bf-4e6a-8dca-9b9426ada118:NORMAL:192.168.5.176:50010|RBW],
# ReplicaUnderConstruction[[DISK]DS-f515e02d-543e-442f-81a1-f45a826d6aec:NORMAL:192.168.5.55:50010|RBW]]}
#2017-01-20 15:41:30,196
#INFO BlockStateChange: BLOCK* addStoredBlock: blockMap updated: 192.168.5.55:50010 is added to blk_1073742597_1775{blockUCState=UNDER_CONSTRUCTION, primaryNodeIndex=-1, replicas=
#[ReplicaUnderConstruction[[DISK]DS-0503a590-18a8-4201-bc23-7da0bbf9dfa5:NORMAL:192.168.5.65:50010|RBW],
# ReplicaUnderConstruction[[DISK]DS-201f600e-1246-436b-85ed-567351cd75ef:NORMAL:192.168.5.56:50010|RBW],
# ReplicaUnderConstruction[[DISK]DS-f0f2ca98-8c60-4ed9-917c-a58ebba5e325:NORMAL:192.168.5.175:50010|RBW],
# ReplicaUnderConstruction[[DISK]DS-fe82d9ee-25bf-4e6a-8dca-9b9426ada118:NORMAL:192.168.5.176:50010|RBW],
# ReplicaUnderConstruction[[DISK]DS-f515e02d-543e-442f-81a1-f45a826d6aec:NORMAL:192.168.5.55:50010|RBW]]} size 0
# The Hadoop replication pipeline
namenodehub = "hadoop namenode %s %s" % (c.HostID("192.168.5.65"),c.IPv4("192.168.5.65"))
attr = "%s,%s" % (c.HostID("192.168.5.65"),c.IPv4("192.168.5.65"))
c.RoleGr(sys.stdout,namenodehub,"hadoop namenode",attr,icontext)
c.ServerListenPromise(sys.stdout,"192.168.5.65","Hadoop Datanode",50010)
c.ServerListenPromise(sys.stdout,"192.168.5.56","Hadoop Datanode",50010)
c.ServerListenPromise(sys.stdout,"192.168.5.175","Hadoop Datanode",50010)
c.ServerListenPromise(sys.stdout,"192.168.5.176","Hadoop Datanode",50010)
c.ServerListenPromise(sys.stdout,"192.168.5.55","Hadoop Datanode",50010)
c.ServerAcceptPostData(sys.stdout,"192.168.5.65","192.168.7.210","Hadoop DataNode","scheduling file for deletion")
c.ServerAcceptPostData(sys.stdout,"192.168.5.56","192.168.7.65","Hadoop DataNode","scheduling file for deletion")
c.ServerAcceptPostData(sys.stdout,"192.168.5.175","192.168.7.56","Hadoop DataNode","scheduling file for deletion")
c.ServerAcceptPostData(sys.stdout,"192.168.5.176","192.168.7.175","Hadoop DataNode","scheduling file for deletion")
c.ServerAcceptPostData(sys.stdout,"192.168.5.55","192.168.7.176","Hadoop DataNode","scheduling file for deletion")
c.ClientPush(sys.stdout,"192.168.7.210","192.168.7.65","replica block","Hadoop DataNode",50010)
c.ClientPush(sys.stdout,"192.168.7.65","192.168.7.56","replica block","Hadoop DataNode",50010)
c.ClientPush(sys.stdout,"192.168.7.56","192.168.7.175","replica block","Hadoop DataNode",50010)
c.ClientPush(sys.stdout,"192.168.7.175","192.168.7.176","replica block","Hadoop DataNode",50010)
#######################################################################################################
# Example 1: 2017-01-20 15:43:33,866 INFO org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService: Scheduling blk_1073742582_1758 file /home/extra/dfs/data/current/BP-1060243018-192.168.7.210-1475739466529/current/finalized/subdir0/subdir2/blk_1073742582 for deletion
# Hadoop datanode is a slave that stores actual data on HDFS
c.ServerInstanceGr(sys.stdout,"Hadoop DataNode",50010,"hdfs.server.datanode",here)
c.ServerInstanceGr(sys.stdout,"secure Hadoop Datanode",1004,"hdfs.server.datanode",here)
c.ServerListenPromise(sys.stdout,thishost,"secure Hadoop Datanode",1004)
c.ServerListenPromise(sys.stdout,thishost,"Hadoop Datanode",50010)
# the specific event - who is the client?? 192.168.7.210??
now = time.time() # or parse "2017-01-20 15:43:33,866" at some *appropriate* time resolution (meaningless to store every event)
who = "hadoop data node 192.168.7.210";
what = "schedule block deletion";
why = "hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService";
how = "/home/extra/dfs/data/current/BP-ref" # ? some invariant or coarse grain
c.EventClue(sys.stdout,who,what,0,here,how,why,icontext);
c.RoleGr(sys.stdout,who,"hadoop data node","host identity 192.168.7.210",icontext)
#######################################################################################################
# Example 2: 2017-01-20 15:43:45,791 INFO org.apache.hadoop.hdfs.server.datanode.DataNode: Receiving BP-1060243018-192.168.7.210-1475739466529:blk_1073742593_1771 src: /192.168.5.176:34987 dest: /192.168.5.55:50010
# 3 IP addresses: src 192.168.5.176:34987 dst 192.168.5.55:50010"
now = time.time() # or parse .. or submit 0 for repeated event
src = "192.168.5.176"
dst = "192.168.5.55"
who = "from %s to %s" % (c.HostID(src),c.HostID(dst))
what = "forward data block";
why = "%s writes data" % c.HostID("192.168.7.210")
where = c.HereGr(sys.stdout,"NYC cloud")
how = "Receiving BP-1060243018-192.168.7.210"
c.EventClue(sys.stdout,who,what,0,where,how,why,icontext);
c.RoleGr(sys.stdout,where,"hadoop data client",c.HostID("192.168.7.210"),icontext)
# implicit
c.ServerAcceptPromise(sys.stdout,dst,src,"Hadoop DataNode",50010)
c.ClientPush(sys.stdout,src,dst,"replica block","Hadoop DataNode",50010)
#######################################################################################################
#Example 4: 127.0.0.1 - - [20/Jan/2017:05:13:34 +0000] "GET /PHP/RUBBoS_logo.jpg HTTP/1.1" 200 10010 "-" "Java/1.7.0_121"
# From RUBBoS
client = "127.0.0.1";
server = "127.0.0.1";
servicename = "Rubbos"
portnumber = 2712 #??
# Strip out specifics of request, into invariant categories that are RELEVANT to debugging
c.ServerAcceptPromise(sys.stdout,server,client,servicename,portnumber)
c.ClientWritePostData(sys.stdout,client,server,"GET PHP image",servicename,portnumber)
c.ServerReplyToGetData(sys.stdout,server,client,servicename,"PHP image")
now = 0 # for repeated event
src = "127.0.0.1"
dst = "127.0.0.1"
who = "from %s to %s" % (c.HostID(src),c.HostID(dst))
what = "web service GET request";
why = "Rubbos web service request"
where = c.HereGr(sys.stdout,"NYC cloud")
how = "connect to port 2712?"
icontext = "Rubbos service"
c.EventClue(sys.stdout,who,what,0,where,how,why,icontext);
#######################################################################################################
#Example 5: SELECT * FROM stories ORDER BY date DESC LIMIT 10
# from RUBBoS
client = "127.0.0.1";
server = "127.0.0.1";
servicename = "mysql"
request = "SELECT * FROM stories ORDER BY date DESC LIMIT 10"
portnumber = 3306
c.ServerAcceptPromise(sys.stdout,server,client,servicename,portnumber)
c.ClientWritePostData(sys.stdout,client,server,request,servicename,portnumber)
c.ServerReplyToGetData(sys.stdout,server,client,servicename,request)
now = 0 # for repeated event
src = "127.0.0.1"
dst = "127.0.0.1"
who = "from %s to %s" % (c.HostID(src),c.HostID(dst))
what = "SQL lookup";
why = "Rubbos web service request"
where = c.HereGr(sys.stdout,"NYC cloud")
how = "connect to port 3306"
icontext = "Rubbos service"
c.EventClue(sys.stdout,who,what,0,where,how,why,icontext);
###############################################################################
print "extracted log time granule key = " + c.LogTimeKeyGen1("2017-01-20 15:43:33")
###############################################################################
# Register each node, foreach IP
namenodehub = "hadoop namenode %s %s" % (c.HostID("192.168.5.65"),c.IPv4("192.168.5.65"))
attr = "%s,%s" % (c.HostID("192.168.5.65"),c.IPv4("192.168.5.65"))
c.RoleGr(sys.stdout,namenodehub,"hadoop namenode",attr,icontext)
# Events currently recognized
# 1. 'addToInvalidates', '' - see sourcecode http://grepcode.com/file/repo1.maven.org/maven2/org.apache.hadoop/hadoop-hdfs/0.22.0/org/apache/hadoop/hdfs/server/namenode/BlockManager.java#BlockManager.addToInvalidates%28org.apache.hadoop.hdfs.protocol.Block%29
# descr "Adds block to list of blocks which will be invalidated on all its datanodes"
# 2. 'allocateBlock', 'replica'
# 3. 'addStoredBlock', 'replica'
# 4. 'replicate', 'replica'
# All of these are pipeline pushes (2 x IP addresses and a timestamp)
c.ServerAcceptPostData(sys.stdout,"192.168.5.55","192.168.7.176","Hadoop DataNode","scheduling file for deletion")
c.ClientPush(sys.stdout,"192.168.7.210","192.168.7.65","replica block","Hadoop DataNode",50010)
c.ServerListenPromise(sys.stdout,"192.168.5.65","Hadoop Datanode",50010)
c.LogTimeFormat1(sys.stdout,"2017-01-20 15:43:33")
# THERE MAY BE 2 KINDS OF ANOMALY
# a) There might be some semantic anomalies (message type unknown)
# see https://issues.apache.org/jira/browse/HDFS-9650 anomaly "Redundant addStoredBlock request received"
who = "from %s to %s" % (c.HostID(src),c.HostID(dst))
what = "ANOMALOUS LOG MESSAGE";
why = "Redundant addStoredBlock request received " + who
when = datetime.strptime(str,'%Y-%m-%d %H:%M:%S')
src = "192.168.5.176"
dst = "192.168.5.55"
where = c.HereGr(sys.stdout,"NYC cloud") # on loghost, or adapt to give argument
how = ""
c.EventClue(sys.stdout,who,what,when,where,how,why,icontext);
#
# b) We can try to get more by looking for frequency anomalies (frequency is non-invariant, so we need to detect an invariant set
# of anomaly conditions from the frequencies by preprocessing
#
# Store (timekey, from_IP_to_IP, granule_average)
print "current time granule key = " + c.TimeKeyGen(now)
print "extracted log time granule key = " + c.LogTimeKeyGen1("2017-01-20 15:43:33")
| 12,224 | 46.753906 | 291 | py |
Cellibrium | Cellibrium-master/Percolibrium/Percolators/python/env_graph.py | #!/usr/bin/env python
from lib.neo4j import Neo4j
from multiprocessing import Process
import urllib, urllib2, json, sys, os, time, pprint, time, pyinotify, glob
config = {}
execfile("conf/config.conf", config)
neo4j = Neo4j(config['neo4j_url'], config['neo4j_user'], config['neo4j_pass'])
pp = pprint.PrettyPrinter(indent=4)
res = []
mylist = []
class ProcessTransientFile(pyinotify.ProcessEvent):
def process_IN_MOVED_TO(self, event):
# We have explicitely registered for this kind of event.
#print '\t', event.pathname, ' -> written'
f = 'data/env_graph.%d' % (time.time())
os.rename(event.pathname, f)
# print time.strftime("%d/%m/%Y %H:%M:%S") + " - MOVING env_graph TO %s" % (f)
# def process_default(self, event):
# print 'default: ', event.maskname
def watch_files():
wm = pyinotify.WatchManager()
notifier = pyinotify.Notifier(wm)
# In this case you must give the class object (ProcessTransientFile)
# as last parameter not a class instance.
wm.watch_transient_file('/home/ubuntu/.CGNgine/state/env_graph', pyinotify.ALL_EVENTS, ProcessTransientFile)
notifier.loop()
def insert_into_db():
while True:
mylist = []
nofiles = 0
for fn in glob.glob('data/env_graph.[0-9]*'):
nofiles += 1
with open(fn) as f:
l = [ i.rstrip('\n').lstrip('(').rstrip(')').split(',') for i in f ]
mylist.extend(l)
os.remove(fn)
#res.append(mylist)
statements = []
for l in mylist:
timestamp = time.time()
r1_left = r2_right = '-'
r1_right = '->'
r2_left = '<-'
if l[1] < 0:
l[1] *= -1
# merge1 = 'MERGE (n)<-[r1:`%s` {type:{two}, b: {three}}]-(m) SET r1 += {r1_props}' % (l[4])
# merge2 = 'MERGE (n)-[r2:`%s` {type:{two}, b: {five}}]->(m) SET r2 += {r2_props}' % (l[2])
r1_left = '<-'
r1_right = r2_left = '-'
r2_right = '->'
merge1 = (
'MERGE (n)%s[r1:`%s` {type:{two}, b: {five}}]%s(m) ON CREATE SET r1.w = 0.3, r1.ts = {ts} '
'ON MATCH SET r1.ts = {ts}, r1.w = (r1.w + 0.7)'
) % (r1_left,l[2],r1_right)
merge2 = (
'MERGE (n)%s[r2:`%s` {type:{two}, b: {three}}]%s(m) ON CREATE SET r2.w = 0.3, r2.ts = {ts} '
'ON MATCH SET r2.ts = {ts}, r2.w = (r2.w + 0.7)'
) % (r2_left,l[4],r2_right)
#MERGE (n:TESTING {Name: 'testing1'})-[r:TESTING {Name: 'testing2'}]->(m:TESTING {Name: 'testing3'}) ON CREATE SET r.weight = 0.3, r.ts = 'blargh' ON MATCH SET r += {ts: '1234512qwerqwer3', weight: (r.weight + 0.7)};
statements.append({
'statement': (
# 'MERGE (n:Label {Name: {one}})'
# 'MERGE (m:Label {Name: {three}})'
'MERGE (n:CGN {Name: {one}}) '
'MERGE (m:CGN {Name: {four}}) '
'%s %s' % (merge1, merge2)
#'MERGE (n)-[:`%s` {type: %s, b:`%s`}]-(m)' % (l[2],l[1],l[3])
# 'MERGE (n)-[:`%s` {type:{three}]-(m)' % (l[2])
# 'MERGE (m)-[:`%s` {type:{three}, b: {two}}]->(m)' % (l[4])
),
'parameters': {
'one': l[0],
'two': l[1],
'three' : l[2],
'four' : l[3],
'five' : l[4],
'ts': str(timestamp),
}
})
#pp.pprint(statements)
print time.strftime("%d/%m/%Y %H:%M:%S") + " - STARTING LOAD (%d files)" % (nofiles)
r = neo4j.neo4j_rest_transaction_commit({'statements': statements})
print time.strftime("%d/%m/%Y %H:%M:%S") + " - COMPLETED LOAD"
time.sleep(5)
# Create index on CGN
q = {
'statements':
[
{
'statement': 'CREATE INDEX ON :CGN(Name)'
}
]
}
r = neo4j.neo4j_rest_transaction_commit(q)
time.sleep(5)
p = Process(target=insert_into_db)
d = Process(target=watch_files)
p.start()
d.start()
print "calling thread"
p.join()
d.join()
| 3,540 | 27.328 | 220 | py |
Cellibrium | Cellibrium-master/Percolibrium/Percolators/python/test_cellibrium.py |
########################################################################################################
#
# TEST
#
########################################################################################################
import sys
import time
import os
import socket
import re
from cellibrium import Cellibrium
c = Cellibrium()
print "------------------------------------------------------"
print "Test event function 1"
print "------------------------------------------------------"
#
# Calling the role interface
#
now = time.time()
who = "Miss Scarlet and Professor Plum";
what = "murder by breaknife";
why = "Miss Scarlet refuses to marry Professory plum";
where = "in the library";
when = now;
how = "by breadknife"
icontext = "cluedo";
c.EventClue(sys.stdout,who,what,when,where,how,why,icontext);
print "------------------------------------------------------"
print "Test event function 2"
print "------------------------------------------------------"
#
# How to call EventCluedo interface for a system issue
#
now = time.time()
who = "cgn_montord";
what = "anomalous state change";
why = "unknown";
where = "mark's laptop";
when = now;
how = "how it happened (i.e. symptoms)" #MakeAnomalyGrName("anomaly",syndrome);
icontext = "system monitoring";
wherex = c.WhereGr(sys.stdout,"Oslo","marklaptop","unknown","192.168.1.183","");
c.EventClue(sys.stdout,who,what,when,wherex,how,why,icontext);
print "------------------------------------------------------"
print " Rules of causation ?? (this is speculative)"
print "------------------------------------------------------"
c.Gr(sys.stdout,"performance anomaly at downstream host","a_origin","performance anomaly at upstream host","distributed system causation")
c.RoleGr(sys.stdout,"performance anomaly at upstream host","performance anomaly","at upstream host","distributed system")
c.RoleGr(sys.stdout,"performance anomaly at downstream host","performance anomaly","at downstream host","distributed system")
where = c.HereGr(sys.stdout,"mark's laptop")
print "------------------------------------------------------"
print "test service functions"
print "------------------------------------------------------"
# Service relationships
# Servername = sshd
# servicename = ssh (port nr 22)
# server -> role, client/server, attr -> host identity ...
where = c.WhereGr(sys.stdout,
"London",
"myserver",
"example.com",
"123.456.789.10/24",
"2001:::7/64",
)
c.ServerInstanceGr(sys.stdout,
"ssh",
22,
"/usr/local/sshd",
where
);
where = c.WhereGr(sys.stdout,
"San Jose",
"desktop",
"example.com",
"321.654.987.99/24",
"2001:0db8:0:f101::1/64"
);
c.ClientInstanceGr(sys.stdout,
"ssh",
"/usr/bin/ssh",
where
)
hostidentity = "123.456.789.55/24"
where = c.WhereGr(sys.stdout,
"NYC datacentre",
"node45-abc",
"cloudprovider.com",
hostidentity,
"",
)
c.ServerInstanceGr(sys.stdout,
"nodemanager",
50345,
"/usr/local/cldstack/cloudmgrd",
c.HereGr(sys.stdout,"Florida datacentre")
)
print "------------------------------------------------------"
print "Test time functions"
print "------------------------------------------------------"
c.LogTimeFormat1(sys.stdout,"2017-01-20 15:43:33")
print "------------------------------------------------------"
print "Generate invariant time keys"
print "------------------------------------------------------"
print "current time granule key = " + c.TimeKeyGen(now)
print "extracted log time granule key = " + c.LogTimeKeyGen1("2017-01-20 15:43:33")
| 3,839 | 26.042254 | 138 | py |
Cellibrium | Cellibrium-master/Percolibrium/Percolators/python/cellibrium.py | import sys
import time
import re
import socket
from datetime import datetime
class Cellibrium:
GR_CONTAINS = 3
GR_FOLLOWS = 2 # i.e. influenced by
GR_EXPRESSES = 4 #represents, etc
GR_NEAR = 1 # approx like
GR_CONTEXT = 5 # approx like
ALL_CONTEXTS = "any"
A = {
"a_contains" : [GR_CONTAINS,"contains","belongs to or is part of"],
"a_generalizes" : [GR_CONTAINS,"generalizes","is a special case of"],
"a_origin" : [GR_FOLLOWS,"may originate from","may be the source or origin of"],
"a_providedby" : [GR_FOLLOWS,"may be provided by","may provide"],
"a_maintainedby" : [GR_FOLLOWS,"is maintained by","maintains"],
"a_depends" : [GR_FOLLOWS,"depends on","partly determines"],
"a_caused_by" : [GR_FOLLOWS,"may be caused by","can cause"],
"a_uses" : [GR_FOLLOWS,"may use","may be used by"],
"a_name" : [GR_EXPRESSES,"is called","is a name for"],
"a_hasattr" : [GR_EXPRESSES,"expresses an attribute","is an attribute of"],
"a_promises" : [GR_EXPRESSES,"promises","is promised by"],
"a_hasinstance" : [GR_EXPRESSES,"has an instance or particular case","is a particular case of"],
"a_hasvalue" : [GR_EXPRESSES,"has value or state","is the state or value of"],
"a_hasarg" : [GR_EXPRESSES,"has argument or parameter","is a parameter or argument of"],
"a_hasrole" : [GR_EXPRESSES,"has the role of","is a role fulfilled by"],
"a_hasoutcome" : [GR_EXPRESSES,"has the outcome","is the outcome of"],
"a_hasfunction" : [GR_EXPRESSES,"has function","is the function of"],
"a_hasconstraint" :[GR_EXPRESSES,"has constraint","constrains"],
"a_interpreted" : [GR_EXPRESSES,"has interpretation","is interpreted from"],
"a_concurrent" : [GR_NEAR,"seen concurrently with","seen concurrently with"],
"a_alias" : [GR_NEAR,"also known as","also known as"],
"a_approx" : [GR_NEAR,"is approximately","is approximately"],
"a_related_to" : [GR_NEAR,"may be related to","may be related to"],
"a_ass_dim" : [0, "NULL", "NULL"],
}
GR_DAY_TEXT = [
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday"
]
GR_MONTH_TEXT = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December"
]
GR_SHIFT_TEXT = [
"Night",
"Morning",
"Afternoon",
"Evening"
]
########################################################################################################
def Sanitize(self,s):
ss = re.sub(r"[\\/,]","_",s)
return ss
########################################################################################################
def InitialGr(self,ofile):
# Basic axioms about causation (upstream/downstream principle)
self.ContextGr(ofile,"service relationship");
self.ContextGr(ofile,"system diagnostics");
self.ContextGr(ofile,"lifecycle state change");
self.ContextGr(ofile,"software exception");
self.ContextGr(ofile,"promise keeping");
self.ContextGr(ofile,"host location identification");
self.Gr(ofile,"client measurement anomaly","a_caused_by","client software exception","system diagnostics");
self.Gr(ofile,"client measurement anomaly","a_caused_by","server software exception","system diagnostics");
self.Gr(ofile,"server measurement anomaly","a_caused_by","server software exception","system diagnostics");
self.Gr(ofile,"measurement anomaly","a_caused_by","software exception","system diagnostics");
self.Gr(ofile,"resource contention","a_caused_by","resource limit","system diagnostics");
self.Gr(ofile,"increasing queue length","a_caused_by","resource contention","system diagnostics");
self.Gr(ofile,"system performance slow","a_caused_by","increasing queue length","system diagnostics");
self.Gr(ofile,"system performance slow","a_related_to","system performance latency","system diagnostics");
self.Gr(ofile,"system performance latency","a_caused_by","resource contention","system diagnostics");
self.Gr(ofile,"system performance latency","a_caused_by","increasing queue length","system diagnostics");
self.Gr(ofile,"system performance latency","a_caused_by","server unavailability","system diagnostics");
self.Gr(ofile,"server unavailability","a_caused_by","software crash","system diagnostics");
self.Gr(ofile,"server unavailability","a_caused_by","system performance slow","system diagnostics");
########################################################################################################
def Gr(self,ofile,from_t, name, to_t, context):
if from_t == to_t:
return
atype,fwd,bwd = list(self.A[name])
sfrom = self.Sanitize(from_t)
if len(context) > 0:
fs = "(" + sfrom + "," + "%d" % atype + "," + fwd + "," + to_t + "," + bwd + "," + context + ")\n"
else:
fs = "(" + sfrom + "," + "%d" % atype + "," + fwd + "," + to_t + "," + bwd + "," + "*" + ")\n"
ofile.write(fs)
########################################################################################################
def IGr(self,ofile,from_t, name, to_t, context):
if from_t == to_t:
return
type,fwd,bwd = self.A[name]
sfrom = self.Sanitize(from_t)
if len(context) > 0:
fs = "(" + sfrom + "," + "-%d" % type + "," + bwd + "," + to_t + "," + fwd + "," + context + ")\n"
else:
fs = "(" + sfrom + "," + "-%d" % type + "," + bwd + "," + to_t + "," + fwd + "," + "*" + ")\n"
ofile.write(fs)
########################################################################################################
def Number(self,ofile,from_t, q, context):
type,fwd,bwd = self.A[a_hasrole]
if len(context) > 0:
fs = "(" + "%.2lf" % q + "," + "-%d" % type + "," + bwd + "," + "number" + "," + fwd + "," + context + ")\n"
else:
fs = "(" + "%.2lf" % q + "," + "-%d" % type + "," + bwd + "," + "number" + "," + fwd + "," + "*" + ")\n"
ofile.write(fs)
########################################################################################################
def GrQ(self,ofile,from_t, name, q, context):
type,fwd,bwd = self.A[name]
if len(context) > 0:
fs = "(" + sfrom + "," + "%d" % type + "," + bwd + "," + "%.2lf" % q + "," + fwd + "," + context + ")\n"
else:
fs = "(" + sfrom + "," + "%d" % type + "," + bwd + "," + "%.2lf" % q + "," + fwd + "," + "*" + ")\n"
ofile.write(fs)
########################################################################################################
def RoleGr(self,ofile,compound_name,role,attributes,ex_context):
self.Gr(ofile,compound_name,"a_hasrole",role,ex_context)
if len(attributes) > 0:
words = attributes.split(",")
for word in words:
self.Gr(ofile,compound_name,"a_hasattr",word,self.ALL_CONTEXTS);
return compound_name
########################################################################################################
def ContextGr(self,ofile,compound_name):
if len(compound_name) > 0:
words = compound_name.split(" ")
for word in words:
self.Gr(ofile,compound_name,"a_contains",word,self.ALL_CONTEXTS);
return compound_name
########################################################################################################
def EventClue(self,ofile,who,what,whentime,where,how,why,icontext):
if (whentime > 0):
when = self.TimeGr(ofile,whentime);
else:
when = "repeated event";
event = who + " saw " + what + " at " + when + " location " + where + " " + how + " cause " + why
attr = who + "," + what + "," + when + "," + where + "," + how + "," + why
self.RoleGr(ofile,event,"event",attr,icontext)
self.RoleGr(ofile,who,"who","",icontext)
self.RoleGr(ofile,what,"what","",icontext)
self.RoleGr(ofile,how,"how","",icontext)
self.RoleGr(ofile,why,"why","",icontext)
self.Gr(ofile,what,"a_related_to",why,icontext)
########################################################################################################
def TimeGr(self,ofile,now):
# To do: Exend to add GMT too...
lt = time.localtime(now)
# Time semantics
lifecycle = "Lcycle_%d" % (lt[0] % 3)
year = "Yr%d" % lt[0]
month = self.GR_MONTH_TEXT[lt[1]-1]
day = "Day%02d" % lt[2]
dow = "%s" % self.GR_DAY_TEXT[lt[6]]
hour = "Hr%02d" % lt[3]
shift = "%s" % self.GR_SHIFT_TEXT[int(lt[3] / 6)];
quarter = "Q%d" % ((lt[4] / 15) + 1)
min = "Min%02d" % lt[4]
interval_start = (lt[4] / 5) * 5
interval_end = (interval_start + 5) % 60
mins = "Min%02d_%02d" % (interval_start,interval_end)
hub = "on %s %s %s %s %s at %s %s %s" % (shift,dow,day,month,year,hour,mins,quarter)
attributes = "%s,%s,%s,%s,%s,%s,%s,%s" % (shift,dow,day,month,year,hour,mins,quarter)
self.RoleGr(ofile,hub,"when",attributes,self.ContextGr(ofile,"local clock time"));
self.RoleGr(ofile,shift,"time of day","work shift","time");
self.RoleGr(ofile,dow,"weekday","","clock time");
self.RoleGr(ofile,day,"day of month","","clock time");
self.RoleGr(ofile,month,"month","","clock time");
self.RoleGr(ofile,year,"year","","clock time");
self.RoleGr(ofile,hour,"hour","","clock time");
self.RoleGr(ofile,month,"minutes past the hour","minutes","clock time");
return hub;
# Could also use WeekSlot (Mon-Sun,MinXX_YY),
# MonthSlot (1stday, lastday, else DayN) etc
########################################################################################################
def LogTimeFormat1(self,ofile,str):
now = datetime.strptime(str,'%Y-%m-%d %H:%M:%S')
return self.TimeGr(ofile,time.mktime(now.timetuple()))
########################################################################################################
def LogTimeKeyGen1(self,str):
now = datetime.strptime(str,'%Y-%m-%d %H:%M:%S')
return self.TimeKeyGen(time.mktime(now.timetuple()))
########################################################################################################
def TimeKeyGen(self,maketime):
#datetimeFormat = '%Y-%m-%d %H:%M:%S'
#now = datetime.strptime(str, datetimeFormat)
#print "time verify " + now.ctime()
#maketime = time.mktime(now.timetuple()))
lt = time.localtime(maketime)
# Time semantics
lifecycle = "Lcycle_%d" % (lt[0] % 3)
year = "Yr%d" % lt[0]
month = self.GR_MONTH_TEXT[lt[1]-1]
day = "Day%02d" % lt[2]
dow = "%3.3s" % self.GR_DAY_TEXT[lt[6]]
hour = "Hr%02d" % lt[3]
shift = "%s" % self.GR_SHIFT_TEXT[int(lt[3] / 6)];
quarter = "Q%d" % ((lt[4] / 15) + 1)
min = "Min%02d" % lt[4]
interval_start = (lt[4] / 5) * 5
interval_end = (interval_start + 5) % 60
mins = "Min%02d_%02d" % (interval_start,interval_end)
key = "%s:%s:%s" % (dow,hour,mins)
return key
########################################################################################################
def WhereGr(self,ofile,address,uqhn,domain,ipv4,ipv6):
# VUQNAME, VDOMAIN, VIPADDRESS,NULL);
# figure out my IP address, FQHN, domainname, etc...
if len(domain) == 0:
domain = "unknown domain";
if len(ipv6) == 0:
ipv6 = "no ipv6"
where = "host %s.%s IPv4 %s ipv6 %s" % (uqhn,domain,ipv4,ipv6)
if len(address) > 0:
attr = "hostname %s,domain %s,IPv4 %s,IPv6 %s,address %s" % (uqhn,domain,ipv4,ipv6,address)
else:
attr = "hostname %s,domain %s,IPv4 %s,IPv6 %s" % (uqhn,domain,ipv4,ipv6)
self.RoleGr(ofile,where,"where",attr,"host location identification");
self.RoleGr(ofile,self.Domain(domain),"dns domain name",domain,"host location identification")
hostname = self.Hostname(uqhn)
self.RoleGr(ofile,hostname,"hostname",uqhn,"host location identification")
self.Gr(ofile,where,"a_alias",hostname,"host location identification"); # Alias for quick association
self.Gr(ofile,self.Domain(domain),"a_contains",hostname,"host location identification");
identity = self.HostID(uqhn)
self.Gr(ofile,hostname,"a_alias",identity,"host location identification");
self.RoleGr(ofile,self.IPv4(ipv4),"ipv4 address",ipv4,"host location identification");
self.Gr(ofile,where,"a_alias",self.IPv4(ipv4),"host location identification"); # Alias for quick association
self.Gr(ofile,self.Domain(domain),"a_contains",self.IPv4(ipv4),"host location identification");
identity = self.HostID(ipv4)
self.Gr(ofile,self.IPv4(ipv4),"a_alias",identity,"host location identification");
if len(ipv6) > 0:
self.RoleGr(ofile,self.IPv6(ipv6),"ipv6 address", ipv6,"host location identification");
self.Gr(ofile,where,"a_alias",self.IPv6(ipv6),"host location identification"); # Alias for quick association
self.Gr(ofile,self.Domain(domain),"a_contains",self.IPv6(ipv6),"host location identification");
identity = self.HostID(ipv6)
self.Gr(ofile,self.IPv6(ipv6),"a_alias",identity,"host location identification")
self.Gr(ofile,hostname,"a_alias",self.IPv6(ipv6),"host location identification");
if len(address) > 0:
addressx = "description address %s" % address
self.RoleGr(ofile,addressx,"description address",address,"host location identification");
self.Gr(ofile,self.Domain(domain),"a_origin",addressx,"host location identification");
self.Gr(ofile,"description address","a_related_to","street address","host location identification");
self.Gr(ofile,hostname,"a_alias",self.IPv4(ipv4),"host location identification");
return where;
########################################################################################################
def HereGr(self,ofile,address):
# VUQNAME, VDOMAIN, VIPADDRESS,NULL);
# figure out my IP address, FQHN, domainname, etc...
id = "host localhost domain undefined ipv4 127.0.0.1 ipv6 ::1" # how can we make this the outer ip?
import netifaces
macs = []
ipv4s = []
ipv6s = []
for i in netifaces.interfaces():
addrs = netifaces.ifaddresses(i)
iface_details = netifaces.ifaddresses(i)
if 'netifaces.AF_INET' in iface_details:
ipv4 = iface_details[netifaces.AF_INET]
if ipv4 == "127.0.0.1":
continue
ipv4s.extend(map(lambda x: x['addr'], filter(lambda x: 'addr' in x, ipv4)))
if 'netifaces.AF_INET6' in iface_details:
ipv6 = iface_details[netifaces.AF_INET6]
if ipv6 == "::1":
continue
ipv6s.extend(map(lambda x: x['addr'], filter(lambda x: 'addr' in x, ipv6)))
if 'netifaces.AF_LINK' in iface_details:
mac = iface_details[netifaces.AF_LINK]
macs.extend(map(lambda x: x['addr'], filter(lambda x: 'addr' in x, mac)))
fqhn = socket.getfqdn()
try:
domain = fqhn.split()[1]
except:
domain = "unknown"
try:
mainv6 = ipv6s[1]
except:
mainv6 = "::1"
try:
mainv4 = ipv4s[1]
except:
return "127.0.0.1"
uqhn = socket.gethostname()
self.WhereGr(ofile,address,uqhn,domain,mainv4,mainv6)
#print ipv4s
#print ipv6s
#print macs
for ip in ipv4s:
try:
identity = self.HostID(socket.gethostbyaddr(ip)[0])
self.Gr(ofile,identity,"a_alias",mainv4,"host location identification")
except:
identity = self.HostID(ip)
identity = self.HostID(ip)
self.Gr(ofile,identity,"a_alias",mainv4,"host location identification")
for ip in ipv6s:
try:
identity = self.HostID(socket.gethostbyaddr(ip)[0])
self.Gr(ofile,identity,"a_alias",mainv4,"host location identification")
except:
identity = self.HostID(ip)
identity = self.HostID(ip)
if not mainv6 == "::1":
self.Gr(ofile,identity,"a_alias",mainv6,"host location identification")
if not mainv4 == "127.0.0.1":
self.Gr(ofile,identity,"a_alias",mainv4,"host location identification")
for mac in macs:
identity = self.HostID(mac)
if not mainv6 == "::1":
self.Gr(ofile,identity,"a_alias",mainv6,"host location identification")
if not mainv4 == "127.0.0.1":
self.Gr(ofile,identity,"a_alias",mainv4,"host location identification")
return id
########################################################################################################
def ServiceGr(self,ofile,servicename,portnumber):
name = "%s on port %d" % (self.SService(servicename), portnumber)
self.RoleGr(ofile,name,self.SService(servicename),self.IPPort(portnumber),"service relationship")
self.Gr(ofile,self.SService(servicename),"a_hasrole","service","service relationship")
self.Gr(ofile,self.SService(servicename),"a_hasfunction",servicename,"service relationship")
port = "%d" % portnumber
self.RoleGr(ofile,self.IPPort(portnumber),"ip portnumber",port,"service relationship")
# ancillary notes
self.Gr(ofile,self.SServer(servicename),"a_hasrole","server","service relationship")
self.Gr(ofile,self.SClient(servicename),"a_hasrole","client","service relationship")
self.Gr(ofile,self.SClient(servicename),"a_depends",self.SServer(servicename),"service relationship")
self.Gr(ofile,self.SClient(servicename),"a_uses",name,"service relationship");
return name
########################################################################################################
def ServerInstanceGr(self,ofile,servicename,portnumber,servername,where):
self.ServiceGr(ofile,servicename,portnumber)
hub = "%s %s" % (self.SServerInstance(servicename,servername),where)
self.RoleGr(ofile,hub,self.SServerInstance(servicename,servername),where,"service relationship instance")
self.Gr(ofile,self.SService(servicename),"a_providedby",hub,"service relationship");
return hub
########################################################################################################
def ClientInstanceGr(self,ofile,servicename,clientname,where):
hub = "%s %s" % (self.SClientInstance(servicename,clientname),where)
self.RoleGr(ofile,hub,self.SClientInstance(servicename,clientname),where,"service relationship instance")
self.Gr(ofile,hub,"a_uses",self.SService(servicename),"service relationship")
return hub
########################################################################################################
def GivePromiseGr(self,ofile,S,R,body):
sender = "promiser %s" % S
receiver = "promisee %s" % R
promisehub = "%s promises to give %s to %s" % (sender,body,receiver)
attr = "%s,promise body +%s,%s" % (sender,body,receiver)
self.RoleGr(ofile,promisehub,"give-provide promise",attr,"promise keeping")
self.Gr(ofile,sender,"a_depends",promisehub,"promise keeping")
self.Gr(ofile,promisehub,"a_depends",sender,"promise keeping")
return promisehub
########################################################################################################
def AcceptPromiseGr(self,ofile,R,S,body):
receiver = "promiser %s" % R
sender = "promisee %s" % S
promisehub = "%s promises to accept %s to %s" % (receiver,body,sender)
attr = "%s,promise body -%s,%s" % (sender,body,receiver)
self.RoleGr(ofile,promisehub,"use-accept promise",attr,"promise keeping")
self.Gr(ofile,receiver,"a_depends",promisehub,"promise keeping")
self.Gr(ofile,"use-accept promise","a_related_to","client pull methods","promise keeping")
return promisehub
########################################################################################################
def ImpositionGr(self,ofile,S,R,body):
sender = "imposer %s" % S
receiver = "imposee %s" % R
promisehub = "%s imposes body %s onto %s" % (sender,body,receiver)
attr = "%s,imposition body %s,%s" % (sender,body,receiver)
self.RoleGr(ofile,promisehub,"imposition",attr,"promise keeping")
self.Gr(ofile,"imposition","a_related_to","client push methods","promise keeping")
# Imposition only affects if there is an accept promise
acceptance = self.AcceptPromiseGr(ofile,R,S,body)
if acceptance:
self.Gr(ofile,promisehub,"a_depends",acceptance,"promise keeping");
self.Gr(ofile,promisehub,"a_depends",sender,"promise keeping")
return promisehub;
########################################################################################################
def ClientQuery(self,ofile,client,server,request,servicename,portnumber):
attr = "port %d" % portnumber
p = "%d" % portnumber
self.RoleGr(ofile,attr,"port",p,"client service query")
query = "%s requests %s from %s on port %d" % (self.SClientInstance(servicename,client),request,self.SServerInstance(servicename,server),portnumber)
attr = "%s,%s,port %d" % (self.SClientInstance(servicename,client),self.SServerInstance(servicename,server),portnumber)
id = "query request for %s" % request
self.RoleGr(ofile,query,id,attr,"service relationship")
# Causal model
attr = "request %s from service %s port %d" % (request,servicename,portnumber)
self.ImpositionGr(ofile,self.SClientInstance(servicename,client),self.SServerInstance(servicename,server),attr)
return query
########################################################################################################
def ClientPush(self,ofile,client,server,request,servicename,portnumber):
attr = "port %d" % portnumber
p = "%d" % portnumber
self.RoleGr(ofile,attr,"port",p,"client service query")
query = "%s pushes %s to %s on port %d" % (self.SClientInstance(servicename,client),request,self.SServerInstance(servicename,server),portnumber)
attr = "%s,%s,port %d" % (self.SClientInstance(servicename,client),self.SServerInstance(servicename,server),portnumber)
id = "query pushes %s" % request
self.RoleGr(ofile,query,id,attr,"service relationship")
# Causal model
attr = "push %s to service %s port %d" % (request,servicename,portnumber)
self.ImpositionGr(ofile,self.SClientInstance(servicename,client),self.SServerInstance(servicename,server),attr)
return query
########################################################################################################
def ServerListenPromise(self,ofile,servername,servicename,port):
listen = "%s listens for requests on port %d" % (self.SServerInstance(servicename,servername),port)
attr = "%s,port %d" % (self.SServerInstance(servicename,servername),port)
self.RoleGr(ofile,listen,"listen on service port",attr,"service relationship")
# Causation
ports = "listening on port %d" % port
self.GivePromiseGr(ofile,self.SServerInstance(servicename,servername),"ip INADDR_ANY",ports)
return listen
########################################################################################################
def ServerAcceptPromise(self,ofile,servername,fromclient,servicename,port):
accept = "%s accept data from %s on port %d" % (self.SServerInstance(servicename,servername),self.SClientInstance(servicename,fromclient),port)
attr = "%s,%s,%s" % (self.SServerInstance(servicename,servername),self.SClientInstance(servicename,fromclient),self.IPPort(port))
id = "accept data on port %d" % port
self.RoleGr(ofile,accept,id,attr,"service relationship")
self.AcceptPromiseGr(ofile,self.SServerInstance(servicename,servername),self.SClientInstance(servicename,fromclient),id)
return accept
########################################################################################################
def ServerReplyPromise(self,ofile,servername,toclient,servicename,port):
reply = "%s reply to %s from port %d" % (self.SServerInstance(servicename,servername),self.SClientInstance(servicename,toclient),port)
attr = "%s,%s,%s" % (self.SServerInstance(servicename,servername),self.SClientInstance(servicename,toclient),self.IPPort(port))
id = "reply to queries from port %d" % port
self.RoleGr(ofile,reply,id,attr,"service relationship")
self.GivePromiseGr(ofile,self.SServerInstance(servicename,servername),self.SClientInstance(servicename,toclient),id)
return reply
########################################################################################################
def ClientWritePostData(self,ofile,client,server,data,servicename,portnumber):
return self.ClientPush(ofile,client,server,data,servicename,portnumber)
########################################################################################################
def ClientReadGetData(self,ofile,client,server,servicename,get,portnumber):
return self.ClientQuery(ofile,client,server,get,servicename,portnumber)
########################################################################################################
def ServerAcceptPostData(self,ofile,server,client,servicename,data):
request = "accept %.64s to %s request" % (data,self.SService(servicename))
return self.AcceptPromiseGr(ofile,server,client,request)
########################################################################################################
def ServerReplyToGetData(self,ofile,server,client,servicename,data):
request = "conditional reply %.64s to %s request" % (data,self.SService(servicename))
return self.GivePromiseGr(ofile,server,client,request)
########################################################################################################
def SClientInstance(self,service,client):
ret = "%s client %s" % (service,client)
return ret
def SServerInstance(self,service,server):
ret = "%s server %s" % (service,server)
return ret
def SClient(self,service):
ret = "%s client" % service
return ret
def SServer(self,service):
ret = "%s server" % service
return ret
def SService(self,servicename):
ret = "service %s" % servicename
return ret
def HostID(self,id):
ret = "host identity %s" % id
return ret
def Domain(self,id):
ret = "domain %s" % id
return ret
def IPv4(self,id):
ret = "ipv4 address %s" % id
return ret
def IPv6(self,id):
ret = "ipv6 address %s" % id
return ret
def Hostname(self,id):
ret = "hostname %s" % id
return ret
def IPPort(self,p):
ret = "ip portnumber %d" % p
return ret
########################################################################################################
def ExceptionGr(self,ofile,origin,logmessage):
# 2016-08-13T15:00:01.906160+02:00 linux-e2vo /usr/sbin/cron[23039]: pam_unix(crond:session): session opened for user root by (uid=0)
# When where who what (new who)
# Why = (lifecycle state change, exception, ...)
# ???
self.Gr(ofile,origin,"a_related_to",logmessage,"???? TBD")
return "something"
########################################################################################################
# Key-value data store (could use an embedded DB, here just with tmp files)
########################################################################################################
#def UpdateRealQ(self,qname,newq):
# if (self.LoadSpecialQ(qname,&oldav,&oldvar)):
# nextav = self.Average(newq,oldav,WAGE);
# newvar = (newq-oldav)*(newq-oldav);
# nextvar = self.Average(newvar,oldvar,WAGE);
# devq = sqrt(oldvar);
# else:
# nextav = 0.5;
# newvar = (newq-oldav)*(newq-oldav);
# nextvar = self.Average(newvar,oldvar,WAGE);
# devq = sqrt(oldvar);
# if (newq > oldav + 3*devq):
# anomaly = "%s_high_anomaly" % qname
# if (newq < oldav - 3*devq):
# anomaly = "%s_low_anomaly" % qname
# self.SaveSpecialQ(qname,nextav,nextvar);
# return anomaly
| 30,510 | 41.494429 | 156 | py |
Cellibrium | Cellibrium-master/Percolibrium/Percolators/python/hello.py | from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello world"
| 102 | 11.875 | 24 | py |
Cellibrium | Cellibrium-master/Percolibrium/Percolators/python/lib/neo4j.py | # /usr/bin/env python
import urllib, urllib2, json, sys, shlex, re, os, base64
class Neo4j:
def __init__(self, neo4j_url, neo4j_user, neo4j_pass):
self.neo4j_user = neo4j_user
self.neo4j_pass = neo4j_pass
self.neo4j_url = neo4j_url
def neo4j_rest_cypher(self, query_data):
b64 = base64.b64encode('%s:%s' % (self.neo4j_user, self.neo4j_pass))
request = urllib2.Request(self.neo4j_url + '/cypher',
data = json.dumps(query_data),
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json; charset=UTF-8',
'X-Stream': 'true',
'Authorization': 'Basic %s' % b64
})
return json.loads(urllib2.urlopen(request).read())
def neo4j_create_or_update_node(self, query_data):
if query_data['ids']:
ids = [];
for id in query_data['ids'].keys():
ids.append('has(n.' + id + ')')
ids.append('n.' + id + '={' + id + '}')
clause = ' and '.join(ids)
q = {
'query' : 'START n=node(*) WHERE ' + clause + ' RETURN n',
'params' : query_data['ids']
}
response = self.neo4j_rest_cypher(q)
if len(response['data']) == 0:
self.neo4j_create_node(query_data)
elif len(response['data']) == 1:
self.neo4j_update_node(query_data)
else:
print 'ERROR: Found several matching nodes (' + str(len(response['data'])) + ')'
#MERGE (sp:Switchport {MAC: {sp_id}.MAC, Name: {sp_id}.Name}) SET sp += {sp_props}
def neo4j_create_node(self,query_data):
props = []
params = {}
if 'ids' in query_data:
for id in query_data['ids']:
props.append(id + ': {' + id + '}')
params[id] = query_data['ids'][id]
if 'properties' in query_data:
for prop in query_data['properties']:
props.append(prop + ': {' + prop + '}')
params[prop] = query_data['properties'][prop]
query = 'CREATE (n {' + ', '.join(props) + '})'
q = {
'query' : query,
'params' : params
}
response = self.neo4j_rest_cypher(q)
def neo4j_update_node(self,query_data):
ids = []
props = []
params = {}
p = {}
if 'ids' in query_data:
for id in query_data['ids']:
ids.append('has(n.' + id + ')')
ids.append('n.' + id + '={' + id + '}')
params[id] = query_data['ids'][id]
if 'properties' in query_data:
for prop in query_data['properties']:
props.append('SET n.' + prop + '={' + prop + '}')
params[prop] = query_data['properties'][prop]
query = 'START n=node(*) WHERE ' + ' AND '.join(ids) + ' ' + ' '.join(props)
q = {
'query' : query,
'params' : params
}
response = self.neo4j_rest_cypher(q)
def neo4j_rest_transaction_commit(self,query_data):
b64 = base64.b64encode('%s:%s' % (self.neo4j_user, self.neo4j_pass))
request = urllib2.Request(self.neo4j_url + '/transaction/commit',
data = json.dumps(query_data),
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json; charset=UTF-8',
'X-Stream': 'true',
'Authorization': 'Basic %s' % b64
} )
return json.loads(urllib2.urlopen(request).read())
# def neo4j_get_node_id(self,label,param,value):
# query = "start n = node(*) where (n:" + label + ") and n." + param + " = {value} return id(n)";
# query_data = { 'query': query, 'params': { 'value' : value } }
# return self.neo4j_rest_cypher(query_data)
#
# def neo4j_property_set(self,node_url,name,value):
# request = urllib2.Request(node_url + '/properties/' + name,
# data = value,
# headers = { 'Content-Type': 'application/json' } )
# request.get_method = lambda: 'PUT'
# response = urllib2.urlopen(request)
# return response.getcode() == 204
| 3,640 | 31.508929 | 102 | py |
Cellibrium | Cellibrium-master/Percolibrium/Percolators/python/lib/__init__.py | 0 | 0 | 0 | py | |
PrincipledPruningBNN | PrincipledPruningBNN-main/bayesian-tensorflow/setup.py | # Imports
from setuptools import setup, find_packages
import pathlib
# Get the long description from the README file
here = pathlib.Path(__file__).parent.resolve()
long_description = (here / "README.md").read_text(encoding="utf-8")
# Setup
setup(
# Basic info
name='bayesian-tensorflow',
version='1.0.0',
# Descriptions
description='Bayesian Neural Networks for TensorFlow',
long_description=long_description,
long_description_content_type='text/markdown',
url='',
# Author info
author='Jim Beckers',
author_email='jbeckers@gnhearing.com',
# Classifiers
classifiers = [
'Development Status :: 3 - Alpha'
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
# Packages
package_dir={"": "src"},
packages=find_packages(where="src"),
python_requires='>=3.7',
# install_requires=[
# 'tensorflow>=2.9.0',
# ],
) | 1,000 | 26.805556 | 67 | py |
PrincipledPruningBNN | PrincipledPruningBNN-main/bayesian-tensorflow/src/bayesian_tensorflow/inference.py | # Imports
import tensorflow as tf
# Local functions
from bayesian_tensorflow import losses
# Custom training step function, for Bayes-by-Backprop
@tf.function
def BBB(model, optim, x_batch, y_batch, n_data):
"""
This function performs gradient descent on a mini-batch of data, when using Bayes-by-Backprop
(BBB) as the inference method. It uses the Variational Free Energy (VFE) as its loss function.
It takes the BNN model, optimizer, batch data (x and y) and the total data-size as is inputs.
It returns the separate VFE terms (i.e. KL-theta, KL-tau, Acc.) of the mini-batch.
When performing the gradient update, the VFE loss is scaled by the batch-size as this results
is a more stable training procedure. The returned VFE values are not scaled!
"""
# Get batch-size
b_size = tf.cast(tf.shape(x_batch)[0], dtype=tf.float32)
# Open GradientTape
with tf.GradientTape() as tape:
# Perform forward pass
y_pred = model(x_batch, training=True)
# Get KL-losses, scaled to percentage of data
kl_theta = sum(model.losses) / n_data * b_size
kl_tau = model.layers[-1].KL() / n_data * b_size
# Compute accuracy loss
acc_loss = losses.AccLossBBB(tf.cast(y_batch, dtype=tf.float32), y_pred)
# Full Varational Free Energy loss, scaled down by batch-size
batch_loss = (kl_theta + kl_tau + acc_loss) / b_size
# Compute gradients after batch
grads = tape.gradient(batch_loss, model.trainable_weights)
# Reset gradients with NaN values
for i in range(len(grads)):
grads[i] = tf.where(tf.math.is_finite(grads[i]), grads[i], tf.zeros_like(grads[i]))
# Optimize model parameters
optim.apply_gradients(zip(grads, model.trainable_weights))
# Return separate losses
return kl_theta, kl_tau, acc_loss
# Custom training step function, for Variance Back-Propagation
@tf.function
def VBP(model, optim, x_batch, y_batch, n_data):
"""
This function performs gradient descent on a mini-batch of data, when using Variance Back-Propagation
(VBP) as the inference method. It uses the Variational Free Energy (VFE) as its loss function.
It takes the BNN model, optimizer, batch data (x and y) and the total data-size as is inputs.
It returns the separate VFE terms (i.e. KL-theta, KL-tau, Acc.) of the mini-batch.
When performing the gradient update, the VFE loss is scaled by the batch-size as this results
is a more stable training procedure. The returned VFE values are not scaled!
"""
# Get batch-size
b_size = tf.cast(tf.shape(x_batch)[0], dtype=tf.float32)
# Open GradientTape
with tf.GradientTape() as tape:
# Perform forward pass
y_pred = model(x_batch, training=True)
# Get KL-losses, scaled to percentage of data
kl_theta = sum(model.losses) / n_data * b_size
kl_tau = model.layers[-1].KL() / n_data * b_size
# Compute accuracy loss
acc_loss = losses.AccLossVBP(tf.cast(y_batch, dtype=tf.float32), y_pred)
# Full Varational Free Energy loss, scaled down by batch-size
batch_loss = (kl_theta + kl_tau + acc_loss) / b_size
# Compute gradients after batch
grads = tape.gradient(batch_loss, model.trainable_weights)
# Reset gradients with NaN values
for i in range(len(grads)):
grads[i] = tf.where(tf.math.is_finite(grads[i]), grads[i], tf.zeros_like(grads[i]))
# Optimize model parameters
optim.apply_gradients(zip(grads, model.trainable_weights))
# Return separate losses
return kl_theta, kl_tau, acc_loss | 3,759 | 36.227723 | 105 | py |
PrincipledPruningBNN | PrincipledPruningBNN-main/bayesian-tensorflow/src/bayesian_tensorflow/losses.py | # Imports
import math
from keras import backend as K
import tensorflow as tf
# Accuracy loss function for regression models, for Bayes-by-Backprop
@tf.function
def AccLossBBB(y_true, y_pred):
"""
This function computes the accuracy loss term of the Variational Free Energy (VFE) for the
Bayes-by-Backprop (BBB) inference method.
It takes the true target value and the model prediction as its inputs.
"""
# Split prediction
y_samp, alpha, beta = tf.unstack(tf.squeeze(y_pred), 3, axis=-1)
# Compute expected tau values
tau = alpha / beta
log_tau = K.sum(K.mean(tf.math.digamma(alpha) - tf.math.log(beta), axis=0))
# Get output dimension
M = tf.cast(tf.rank(alpha), dtype=tf.float32)
# Return accuracy loss
return 0.5 * K.sum(tau * K.square(y_true - y_samp) + M * K.log(2 * math.pi) - log_tau)
# ACcuracy loss function for regression models, for Variance Back-Propagation
@tf.function
def AccLossVBP(y_true, y_pred):
"""
This function computes the accuracy loss term of the Variational Free Energy (VFE) for the
Variance Back-Propagation inference method.
It takes the true target value and the model prediction as its inputs.
"""
# Split prediction
y_mean, y_var, alpha, beta = tf.unstack(tf.squeeze(y_pred), 4, axis=-1)
# Compute expected values tau
tau = alpha / beta
log_tau = K.sum(K.mean(tf.math.digamma(alpha) - tf.math.log(beta), axis=0))
# Get output dimension
M = tf.cast(tf.rank(alpha), dtype=tf.float32)
# Return accuracy loss
return 0.5 * K.sum(tau * (K.square(y_true - y_mean) + y_var) + M * K.log(2 * math.pi) - log_tau) | 1,705 | 30.592593 | 100 | py |
PrincipledPruningBNN | PrincipledPruningBNN-main/bayesian-tensorflow/src/bayesian_tensorflow/activations.py | # Imports
import math
from keras import backend as K
import tensorflow as tf
# ReLU function
@tf.function
def relu_moments(h_mean, h_var):
"""
This functions computes the first and second (central) moment of a Normal distribution
passing through a ReLU function.
It takes the mean and variance of the Normal as its inputs, and returns the mean and
variance of the resulting output Normal distribution.
The moment are computed using the well-defined moments of a rectified Normal distribution.
"""
# Get std.dev.
h_std = K.sqrt(h_var)
# Compute intermediate values
a_pre = -(h_mean / h_std)
a = tf.where(tf.math.is_nan(a_pre), tf.zeros_like(a_pre), a_pre)
Z = 0.5 - 0.5 * tf.math.erf(a / tf.math.sqrt(2.))
phi = 1./tf.math.sqrt(2*math.pi) * K.exp(-0.5 * K.square(a))
# Compute mean ...
y_mean = h_mean * Z + h_std * phi
# ... and variance
y_var = (h_var + K.square(h_mean)) * Z + h_mean * h_std * phi - K.square(y_mean)
# Return moments
return y_mean, y_var
# Sigmoid function
@tf.function
def sigmoid_moments(h_mean, h_var):
"""
This function computed the first and second (central) moment of a Normal distribution
passing through a Sigmoid function.
It takes the mean and variance of the Normal as its inputs, and returns the mean and
variance of the resulting output Normal distribution.
The moment are computed using an approximation of the sigmoid function by means of the
cumulative distribution function of a Normal distribution.
"""
# Intermediate value
t = K.sqrt(1. + math.pi / 8. * K.square(h_var))
# Compute mean ...
y_mean = tf.math.sigmoid(h_mean / t)
# .. and variance
y_var = y_mean * (1. - y_mean) * (1. - 1./t)
# Return moments
return y_mean, y_var
# Hyperbolic tangent function
@tf.function
def tanh_moments(h_mean, h_var):
"""
This function computed the first and second (central) moment of a Normal distribution
passing through a hyperbolic tangent function.
It takes the mean and variance of the Normal as its inputs, and returns the mean and
variance of the resulting output Normal distribution.
The moment are computed using a linear transform of the sigmoid function.
"""
# Use sigmoid moments ...
s_mean, s_var = sigmoid_moments(2*h_mean, 4*h_var)
# ... and linear transforms
y_mean, y_var = 2*s_mean - 1, 4*s_var
# Return moments
return y_mean, y_var | 2,544 | 28.252874 | 94 | py |
PrincipledPruningBNN | PrincipledPruningBNN-main/bayesian-tensorflow/src/bayesian_tensorflow/__init__.py | # Import activations
from .activations import *
# Import evaluation functions
from .evaluation import *
# Import layers
from .layers import *
# Import inference functions
from .inference import *
# Import losses
from .losses import * | 237 | 16 | 29 | py |
PrincipledPruningBNN | PrincipledPruningBNN-main/bayesian-tensorflow/src/bayesian_tensorflow/evaluation.py | # Imports
import tensorflow as tf
# Local functions
from bayesian_tensorflow import losses
# Custom training step function for Bayes-by-Backprop
@tf.function
def BBB(model, x_batch, y_batch, n_data):
"""
This function evaluation the Variational Free Energy (VFE) when using the Bayes-by-Backprop (BBB)
inference method.
It takes the BNN model, batch data (x and y) and the total data-size as is inputs. It returns the
VFE value of the mini-batch.
"""
# Get batch-size
b_size = tf.cast(tf.shape(x_batch)[0], dtype=tf.float32)
# Perform forward pass
y_pred = model(x_batch, training=False)
# Get KL-losses, scaled to percentage of data
kl_theta = sum(model.losses) / n_data * b_size
kl_tau = model.layers[-1].KL() / n_data * b_size
# Compute accuracy loss
acc_loss = losses.AccLossBBB(tf.cast(y_batch, dtype=tf.float32), y_pred)
# Return total VFE loss
return kl_theta + kl_tau + acc_loss
# Custom training step function, for Variance Back-Propagation
@tf.function
def VBP(model, x_batch, y_batch, n_data):
"""
This function evaluation the Variational Free Energy (VFE) when using the Variance Back-Propagation
(VBP) inference method.
It takes the BNN model, batch data (x and y) and the total data-size as is inputs. It returns the
VFE value of the mini-batch.
"""
# Get batch-size
b_size = tf.cast(tf.shape(x_batch)[0], dtype=tf.float32)
# Perform forward pass
y_pred = model(x_batch, training=False)
# Get KL-losses, scaled to percentage of data
kl_theta = sum(model.losses) / n_data * b_size
kl_tau = model.layers[-1].KL() / n_data * b_size
# Compute accuracy loss
acc_loss = losses.AccLossVBP(tf.cast(y_batch, dtype=tf.float32), y_pred)
# Return total VFE loss
return kl_theta + kl_tau + acc_loss | 1,898 | 29.142857 | 103 | py |
PrincipledPruningBNN | PrincipledPruningBNN-main/bayesian-tensorflow/src/bayesian_tensorflow/layers/bayes_by_backprop.py | # Imports
from keras import backend as K
from keras import initializers, activations
import tensorflow as tf
# Dense layer
class DenseBBB(tf.keras.layers.Layer):
"""
Variational fully connected layer (dense), following Bayes-by-Backprop (BBB).
It takes the number of units as its input, all other inputs are optional.
"""
def __init__(self,
units, # number of output features
activation = None, # activation function
reparam = 'local', # which reparameterization
prior_var = 1., # prior variance of parameters
std_dev = 0., # standard deviation of initializer
init = 'prior', # manner in which params are initialized
seed = None, # seed for (param) initialization
**kwargs):
# Copy inputs ...
self.units = units
self.activation = activations.get(activation)
self.reparam = reparam
self.prior_var = prior_var
self.std_dev = std_dev
self.init = init
# ... and set seed
if seed is not None:
tf.random.set_seed(seed)
# Other args
super().__init__(**kwargs)
# Standard function to return output shape
def compute_output_shape(self, input_shape):
return input_shape[0], self.units
# Standard function to create layer parameters
def build(self, input_shape):
# Initializer
if self.init == 'prior': # 'prior' is (sampled around) the prior
self.init_mu = initializers.normal(mean=0., stddev=self.std_dev)
self.init_rho = initializers.normal(mean=K.log(K.exp(tf.math.sqrt(self.prior_var)) - 1.),
stddev=self.std_dev)
elif self.init == 'he': # 'he' uses mean and variance from HeNormal
self.init_mu = initializers.normal(mean=0., stddev=self.std_dev)
self.init_rho = initializers.normal(mean=K.log(K.exp(tf.math.sqrt(2. / input_shape[1])) - 1.),
stddev=self.std_dev)
elif self.init == 'glorot': # 'glorot' uses mean and variance from GlorotNormal
self.init_mu = initializers.normal(mean=0., stddev=self.std_dev)
self.init_rho = initializers.normal(mean=K.log(K.exp(tf.math.sqrt(2. / (input_shape[1] + self.units))) - 1.),
stddev=self.std_dev)
elif self.init == 'paper': # 'paper' follows Haussmann et al. (2019)
self.init_mu = initializers.HeNormal()
self.init_rho = initializers.normal(mean=-4.5, stddev=1e-3)
elif self.init == 'tf': # 'tf' follows the TensorFlow implementation
self.init_mu = initializers.normal(mean=0., stddev=0.1)
self.init_rho2 = initializers.normal(mean=-6., stddev=0.1)
# Weight matrix 'W', also called kernel
self.kernel_mu = self.add_weight(name='kernel_mu', shape=(input_shape[1], self.units),
initializer=self.init_mu, trainable=True)
self.kernel_rho = self.add_weight(name='kernel_rho', shape=(input_shape[1], self.units),
initializer=self.init_rho, trainable=True)
# Bias vector 'b'
self.bias_mu = self.add_weight(name='bias_mu', shape=(self.units,),
initializer=self.init_mu, trainable=True)
self.bias_rho = self.add_weight(name='bias_rho', shape=(self.units,),
initializer=self.init_rho, trainable=True)
# Add KL-divergence loss
self.add_loss(lambda: self.KL())
# Create masks for pruning
self.kernel_mask = tf.ones_like(self.kernel_mu)
self.bias_mask = tf.ones_like(self.bias_mu)
# Super build function
super().build(input_shape)
# Standard function to compute output on forward pass
def call(self, inputs, **kwargs):
# For local reparameterization
if self.reparam == 'local':
# Get weight and bias variances
kernel_sigma = tf.math.softplus(self.kernel_rho)
bias_sigma = tf.math.softplus(self.bias_rho)
# Get output mean and variance
out_mu = K.dot(inputs, self.kernel_mu) + self.bias_mu
out_sigma = K.dot(K.square(inputs), K.square(kernel_sigma)) + K.square(bias_sigma)
# Sample from output
y = out_mu + K.sqrt(out_sigma) * tf.random.normal(tf.shape(out_mu))
# For global reparameterization
else:
# Sample weight matrix 'W'
kernel_sigma = tf.math.softplus(self.kernel_rho)
kernel = self.kernel_mu + kernel_sigma * tf.random.normal(self.kernel_mu.shape)
# Sample bias vector 'b'
bias_sigma = tf.math.softplus(self.bias_rho)
bias = self.bias_mu + bias_sigma * tf.random.normal(self.bias_mu.shape)
# Compute output sample
y = K.dot(inputs, kernel) + bias
# Return layer output
return self.activation(y)
# Custom function to compute KL-divergence loss of layer
def KL(self):
# Kernel
w_mean = self.kernel_mu
w_var = K.square(K.softplus(self.kernel_rho))
w_vals = (w_var + K.square(w_mean)) / self.prior_var - 1. + K.log(self.prior_var) - K.log(w_var)
KL_w = 0.5 * K.sum(tf.boolean_mask(w_vals, tf.math.is_finite(w_vals)))
# Bias
b_mean = self.bias_mu
b_var = K.square(K.softplus(self.bias_rho))
b_vals = (b_var + K.square(b_mean)) / self.prior_var - 1. + K.log(self.prior_var) - K.log(b_var)
KL_b = 0.5 * K.sum(tf.boolean_mask(b_vals, tf.math.is_finite(b_vals)))
# Return sum of kernel and bias
return KL_w + KL_b
# Custom function for compression based on BMR
def compress(self, red_var=1e-16):
# Kernel matrix
w_mean = self.kernel_mu
w_rho = self.kernel_rho
w_var = K.square(K.softplus(w_rho))
# Compute BMR values
BMR_w = self.BMR(w_mean, w_var, red_var)
# Compress parameters with dVFE <= 0
self.kernel_mu.assign(tf.where(BMR_w<=0, tf.zeros_like(w_mean), w_mean))
self.kernel_rho.assign(tf.where(BMR_w<=0, -1e5*tf.ones_like(w_rho), w_rho))
# Update kernel mask
self.kernel_mask = tf.where(BMR_w<=0, tf.zeros_like(w_mean), self.kernel_mask)
# Bias vector
b_mean = self.bias_mu
b_rho = self.bias_rho
b_var = K.square(K.softplus(b_rho))
# Compute BMR values
BMR_b = self.BMR(b_mean, b_var, red_var)
# Compress parameters with dVFE <= 0
self.bias_mu.assign(tf.where(BMR_b<=0, tf.zeros_like(b_mean), b_mean))
self.bias_rho.assign(tf.where(BMR_b<=0, -1e5*tf.ones_like(b_rho), b_rho))
# Update bias mask
self.bias_mask = tf.where(BMR_b<=0, tf.zeros_like(b_mean), self.bias_mask)
# Custom function to compute BMR values
def BMR(self, mean, var, red_var):
# Compute intermediate values
Pi_i = 1. / red_var
P_f = 1. / var
P_i = P_f + Pi_i - 1. / self.prior_var
mu_i = P_f * mean / P_i
# Return BMR values
return 0.5 * ((mean**2 * P_f - mu_i**2 * P_i) - K.log(Pi_i * P_f / P_i * self.prior_var))
# Custom function to reset model parameters
def param_reset(self):
# Kernel matrix
w_mean = self.kernel_mu
w_rho = self.kernel_rho
# Reset kernel
self.kernel_mu.assign(tf.where(self.kernel_mask==0, tf.zeros_like(w_mean), w_mean))
self.kernel_rho.assign(tf.where(self.kernel_mask==0, -1e5*tf.ones_like(w_rho), w_rho))
# Bias vector
b_mean = self.bias_mu
b_rho = self.bias_rho
# Reset bias
self.bias_mu.assign(tf.where(self.bias_mask==0, tf.zeros_like(b_mean), b_mean))
self.bias_rho.assign(tf.where(self.bias_mask==0, -1e5*tf.ones_like(b_rho), b_rho))
# Custom Gamma layer
class GammaBBB(tf.keras.layers.Layer):
"""
Dummy layer for adding an alpha and beta parameter of a Gamma distribution to a BNN.
Allows for joint optimization of posterior precision parameter(s).
"""
def __init__(self,
units = 1, # number of output features
alpha = 1., # initial value for alpha
beta = 1., # initial value for beta
**kwargs):
# Set units
self.units = units
# Set initial alpha and beta value
self.alpha_init = initializers.constant(K.log(alpha))
self.beta_init = initializers.constant(K.log(beta))
# Other args
super().__init__(**kwargs)
# Standard function to return output shape
def compute_output_shape(self, input_shape):
return input_shape[0], self.units
# Standard function to create layer parameters
def build(self, input_shape):
# Add (log) alpha and beta parameters
self.log_alpha = self.add_weight(name='log_alpha', shape=(self.units,),
initializer=self.alpha_init, trainable=True)
self.log_beta = self.add_weight(name='log_beta', shape=(self.units,),
initializer=self.beta_init, trainable=True)
# Super build function
super().build(input_shape)
# Standard function to compute output
def call(self, inputs, **kwargs):
# Extend alpha and beta to match inputs size
alpha = K.exp(self.log_alpha) * tf.ones_like(inputs)
beta = K.exp(self.log_beta) * tf.ones_like(inputs)
# Return inputs incl. alpha and beta
return tf.stack([inputs, alpha, beta], axis=-1)
# Custom function for KL-divergence
def KL(self):
# Get alpha and beta
alpha = K.exp(self.log_alpha)
beta = K.exp(self.log_beta)
# Return KL-divergence
return K.sum((alpha - 1) * tf.math.digamma(alpha) - tf.math.lgamma(alpha) + tf.math.log(beta) + alpha * ((1 - beta) / beta))
# GRU cell (i.e. layer)
class GRUCellBBB(tf.keras.layers.Layer):
"""
Variational Gated Recurrent Unit (GRU), following Bayes-by-Backprop (BBB).
It takes the number of units as its input, all other inputs are optional.
"""
def __init__(self,
units, # number of output features
reparam = 'local', # which reparameterization
prior_var = 1., # prior variance of parameters
std_dev = 0., # standard deviation of initializer
init = 'prior', # manner in which params are initialized
seed = None, # seed for (param) initialization
**kwargs):
# Copy inputs ...
self.units = 3*units
self.state_size = units
self.reparam = reparam
self.prior_var = prior_var
self.std_dev = std_dev
self.init = init
# ... and set seed
if seed is not None:
tf.random.set_seed(seed)
# Other args
super().__init__(**kwargs)
# Standard function to return output shape
def compute_output_shape(self, input_shape):
return input_shape[0], self.units
# Standard function to create layer parameters
def build(self, input_shape):
# Initializer
if self.init == 'prior': # 'prior' is (sampled around) the prior
self.init_mu = initializers.normal(mean=0., stddev=self.std_dev)
self.init_rho = initializers.normal(mean=K.log(K.exp(tf.math.sqrt(self.prior_var)) - 1.),
stddev=self.std_dev)
elif self.init == 'he': # 'he' uses mean and variance from HeNormal
self.init_mu = initializers.normal(mean=0., stddev=self.std_dev)
self.init_rho = initializers.normal(mean=K.log(K.exp(tf.math.sqrt(2. / input_shape[1])) - 1.),
stddev=self.std_dev)
elif self.init == 'glorot': # 'glorot' uses mean and variance from GlorotNormal
self.init_mu = initializers.normal(mean=0., stddev=self.std_dev)
self.init_rho = initializers.normal(mean=K.log(K.exp(tf.math.sqrt(2. / (input_shape[1] + self.units))) - 1.),
stddev=self.std_dev)
elif self.init == 'paper': # 'paper' follows Haussmann et al. (2019)
self.init_mu = initializers.HeNormal()
self.init_rho = initializers.normal(mean=-4.5, stddev=1e-3)
elif self.init == 'tf': # 'tf' follows the TensorFlow implementation
self.init_mu = initializers.normal(mean=0., stddev=0.1)
self.init_rho2 = initializers.normal(mean=-6., stddev=0.1)
# Kernel matrix
self.W_mu = self.add_weight(name='W_mu', shape=(input_shape[1], self.units),
initializer=self.init_mu, trainable=True)
self.W_rho = self.add_weight(name='W_rho', shape=(input_shape[1], self.units),
initializer=self.init_rho, trainable=True)
# Hidden matrix
self.U_mu = self.add_weight(name='U_mu', shape=(self.state_size, self.units),
initializer=self.init_mu, trainable=True)
self.U_rho = self.add_weight(name='U_rho', shape=(self.state_size, self.units),
initializer=self.init_rho, trainable=True)
# Bias vector
self.b_mu = self.add_weight(name='b_mu', shape=(self.units,),
initializer=self.init_mu, trainable=True)
self.b_rho = self.add_weight(name='b_rho', shape=(self.units,),
initializer=self.init_rho, trainable=True)
# Sampling noise
if self.reparam == 'local':
self.r_eps = tf.Variable(tf.zeros(int(self.units/3)), trainable=False)
self.u_eps = tf.Variable(tf.zeros(int(self.units/3)), trainable=False)
self.h_pre_eps = tf.Variable(tf.zeros(int(self.units/3)), trainable=False)
else:
self.W_eps = tf.Variable(tf.zeros_like(self.W_rho), trainable=False)
self.U_eps = tf.Variable(tf.zeros_like(self.U_rho), trainable=False)
self.b_eps = tf.Variable(tf.zeros_like(self.b_rho), trainable=False)
# Add KL-divergence loss
self.add_loss(lambda: self.KL())
# Create masks for pruning
self.W_mask = tf.ones_like(self.W_mu)
self.U_mask = tf.ones_like(self.U_mu)
self.b_mask = tf.ones_like(self.b_mu)
# Super build function
super().build(input_shape)
# Standard function to compute output
def call(self, inputs, states, **kwargs):
# Get state value
h_min1 = states[0]
# Sample noise for first time step
if K.sum(h_min1) == 0:
self.sample_noise()
# For local reparameterization
if self.reparam == 'local':
# Split means ...
W_r_mu, W_u_mu, W_h_mu = tf.split(self.W_mu, 3, axis=1)
U_r_mu, U_u_mu, U_h_mu = tf.split(self.U_mu, 3, axis=1)
b_r_mu, b_u_mu, b_h_mu = tf.split(self.b_mu, 3, axis=0)
# ... and variances
W_r_sig, W_u_sig, W_h_sig = tf.split(K.softplus(self.W_rho), 3, axis=1)
U_r_sig, U_u_sig, U_h_sig = tf.split(K.softplus(self.U_rho), 3, axis=1)
b_r_sig, b_u_sig, b_h_sig = tf.split(K.softplus(self.b_rho), 3, axis=0)
# Reset gate
r_mu = K.dot(inputs, W_r_mu) + K.dot(h_min1, U_r_mu) + b_r_mu
r_sig = K.dot(K.square(inputs), K.square(W_r_sig)) + K.dot(K.square(h_min1), K.square(U_r_sig)) + b_r_sig
r = tf.math.sigmoid(r_mu + r_sig * self.r_eps)
# Update gate
u_mu = K.dot(inputs, W_u_mu) + K.dot(h_min1, U_u_mu) + b_u_mu
u_sig = K.dot(K.square(inputs), K.square(W_u_sig)) + K.dot(K.square(h_min1), K.square(U_u_sig)) + b_u_sig
u = tf.math.sigmoid(u_mu + u_sig * self.u_eps)
# Hidden unit pre
h_pre_mu = K.dot(inputs, W_h_mu) + K.dot(h_min1, U_h_mu) + b_h_mu
h_pre_sig = K.dot(K.square(inputs), K.square(W_h_sig)) + K.dot(K.square(h_min1), K.square(U_h_sig)) + b_h_sig
h_pre = tf.math.tanh(h_pre_mu + h_pre_sig * self.h_pre_eps)
# Hidden unit final
h = u * h_min1 + (1. - u) * h_pre
# For global reparameterization:
else:
# Sample and split parameters
W_r, W_u, W_h = tf.split(self.W_mu + K.softplus(self.W_rho) * self.W_eps, 3, axis=1)
U_r, U_u, U_h = tf.split(self.U_mu + K.softplus(self.U_rho) * self.U_eps, 3, axis=1)
b_r, b_u, b_h = tf.split(self.b_mu + K.softplus(self.b_rho) * self.b_eps, 3, axis=0)
# Reset gate
r = tf.math.sigmoid(K.dot(inputs, W_r) + K.dot(h_min1, U_r) + b_r)
# Update gate
u = tf.math.sigmoid(K.dot(inputs, W_u) + K.dot(h_min1, U_u) + b_u)
# Hidden unit pre
h_pre = tf.math.tanh(K.dot(inputs, W_h) + K.dot(r * h_min1, U_h) + b_h)
# Hidden unit final
h = u * h_min1 + (1. - u) * h_pre
# Return cell output and state
return h, [h]
# Custom function for sampling noise matrices and vectors
def sample_noise(self):
# Local reparameterization
if self.reparam == 'local':
self.r_eps.assign(tf.random.normal(tf.shape(self.r_eps)))
self.u_eps.assign(tf.random.normal(tf.shape(self.u_eps)))
self.h_pre_eps.assign(tf.random.normal(tf.shape(self.h_pre_eps)))
# Global reparameterization
else:
self.W_eps.assign(tf.random.normal(tf.shape(self.W_eps)))
self.U_eps.assign(tf.random.normal(tf.shape(self.U_eps)))
self.b_eps.assign(tf.random.normal(tf.shape(self.b_eps)))
# Custom function to compute KL-divergence values given mean and std.dev.
def kl_value(self, mean, std):
# Get variance
var = K.square(std)
# KL-divergence values
KL = (var + K.square(mean)) / self.prior_var - 1. + K.log(self.prior_var) - K.log(var)
# Return filtered values
return 0.5 * K.sum(tf.boolean_mask(KL, tf.math.is_finite(KL)))
# Custom function to compute total KL-divergence loss of layer
def KL(self):
# Get all variances
W_sig, U_sig, b_sig = K.softplus(self.W_rho), K.softplus(self.U_rho), K.softplus(self.b_rho)
# Return values
return self.kl_value(self.W_mu, W_sig) + self.kl_value(self.U_mu, U_sig) + self.kl_value(self.b_mu, b_sig)
# Custom function for compression based on BMR
def compress(self, red_var=1e-16):
# Kernel matrix
w_mean = self.W_mu
w_rho = self.W_rho
w_var = K.square(K.softplus(w_rho))
# Compute BMR values
BMR_w = self.BMR(w_mean, w_var, red_var)
# Compress parameters with dVFE <= 0
self.W_mu.assign(tf.where(BMR_w<=0, tf.zeros_like(w_mean), w_mean))
self.W_rho.assign(tf.where(BMR_w<=0, -1e5*tf.ones_like(w_rho), w_rho))
# Update kernel mask
self.W_mask = tf.where(BMR_w<=0, tf.zeros_like(w_mean), self.W_mask)
# Hidden matrix
u_mean = self.U_mu
u_rho = self.U_rho
u_var = K.square(K.softplus(u_rho))
# Compute BMR values
BMR_u = self.BMR(u_mean, u_var, red_var)
# Compress parameters with dVFE <= 0
self.U_mu.assign(tf.where(BMR_u<=0, tf.zeros_like(u_mean), u_mean))
self.U_rho.assign(tf.where(BMR_u<=0, -1e5*tf.ones_like(u_rho), u_rho))
# Update hidden mask
self.U_mask = tf.where(BMR_u<=0, tf.zeros_like(u_mean), self.U_mask)
# Bias
b_mean = self.b_mu
b_rho = self.b_rho
b_var = K.square(K.softplus(b_rho))
# Compute BMR values
BMR_b = self.BMR(b_mean, b_var, red_var)
# Compress parameters with dVFE <= 0
self.b_mu.assign(tf.where(BMR_b<=0, tf.zeros_like(b_mean), b_mean))
self.b_rho.assign(tf.where(BMR_b<=0, -1e5*tf.ones_like(b_rho), b_rho))
# Update bias mask
self.b_mask = tf.where(BMR_b<=0, tf.zeros_like(b_mean), self.b_mask)
# Custom function to compute BMR values
def BMR(self, mean, var, red_var):
# Compute intermediate values
Pi_i = 1. / red_var
P_f = 1. / var
P_i = P_f + Pi_i - 1. / self.prior_var
mu_i = P_f * mean / P_i
# Return BMR values
return 0.5 * ((mean**2 * P_f - mu_i**2 * P_i) - K.log(Pi_i * P_f / P_i * self.prior_var))
# Custom function to reset model parameters
def param_reset(self):
# Kernel matrix
w_mean = self.W_mu
w_rho = self.W_rho
# Reset kernel
self.W_mu.assign(tf.where(self.W_mask==0, tf.zeros_like(w_mean), w_mean))
self.W_rho.assign(tf.where(self.W_mask==0, -1e5*tf.ones_like(w_rho), w_rho))
# Hidden matrix
u_mean = self.U_mu
u_rho = self.U_rho
# Reset hidden
self.U_mu.assign(tf.where(self.U_mask==0, tf.zeros_like(u_mean), u_mean))
self.U_rho.assign(tf.where(self.U_mask==0, -1e5*tf.ones_like(u_rho), u_rho))
# Bias vector
b_mean = self.b_mu
b_rho = self.b_rho
# Reset bias
self.b_mu.assign(tf.where(self.b_mask==0, tf.zeros_like(b_mean), b_mean))
self.b_rho.assign(tf.where(self.b_mask==0, -1e5*tf.ones_like(b_rho), b_rho)) | 22,719 | 41.706767 | 132 | py |
PrincipledPruningBNN | PrincipledPruningBNN-main/bayesian-tensorflow/src/bayesian_tensorflow/layers/variance_backpropagation.py | # Imports
import math
from keras import backend as K
from keras import initializers
import tensorflow as tf
# Local functions
from bayesian_tensorflow import activations
# Dense layer
class DenseVBP(tf.keras.layers.Layer):
"""
Variational fully connected layer (dense), following Variance Back-Propagation (VBP).
It takes the number of units as its input, all other inputs are optional.
"""
def __init__(self,
units, # number of output features
is_input = False, # if layer is input layer
is_output = False, # if layer is output layer
data_var = 1e-3, # initial value for data variance
prior_var = 1., # prior variance of parameters
std_dev = 0.01, # standard deviation of initializer
init = 'prior', # manner in which params are initialized
seed = None, # seed for (param) initialization
**kwargs):
# Copy inputs ...
self.units = units
self.is_input = is_input
self.is_output = is_output
self.data_var = data_var
self.prior_var = prior_var
self.std_dev = std_dev
self.init = init
# ... and set seed
if seed is not None:
tf.random.set_seed(seed)
# Other args
super().__init__(**kwargs)
# Standard function to return output shape
def compute_output_shape(self, input_shape):
return input_shape[0], self.units
# Standard function to create layer parameters
def build(self, input_shape):
# Initializer
if self.init == 'prior': # 'prior' is (sampled around) the prior
self.init_mu = initializers.normal(mean=0., stddev=self.std_dev)
self.init_rho2 = initializers.normal(mean=K.log(K.exp(self.prior_var) - 1.),
stddev=self.std_dev)
elif self.init == 'he': # 'he' uses mean and variance from HeNormal
self.init_mu = initializers.normal(mean=0., stddev=self.std_dev)
self.init_rho2 = initializers.normal(mean=K.log(K.exp(2. / input_shape[1]) - 1.),
stddev=self.std_dev)
elif self.init == 'glorot': # 'glorot' uses mean and variance from GlorotNormal
self.init_mu = initializers.normal(mean=0., stddev=self.std_dev)
self.init_rho2 = initializers.normal(mean=K.log(K.exp(2. / (input_shape[1] + self.units)) - 1.),
stddev=self.std_dev)
elif self.init == 'paper': # 'paper' follows Haussmann et al. (2019)
self.init_mu = initializers.HeNormal()
self.init_rho2 = initializers.normal(mean=-9., stddev=1e-3)
elif self.init == 'tf': # 'tf' follows the TensorFlow implementation
self.init_mu = initializers.normal(mean=0., stddev=0.1)
self.init_rho2 = initializers.normal(mean=-6., stddev=0.1)
# Weight matrix 'W', also called kernel
self.kernel_mu = self.add_weight(name='kernel_mu', shape=(input_shape[1], self.units),
initializer=self.init_mu, trainable=True)
self.kernel_rho2 = self.add_weight(name='kernel_rho2', shape=(input_shape[1], self.units),
initializer=self.init_rho2, trainable=True)
# Bias vector 'b'
self.bias_mu = self.add_weight(name='bias_mu', shape=(self.units,),
initializer=self.init_mu, trainable=True)
self.bias_rho2 = self.add_weight(name='bias_rho2', shape=(self.units,),
initializer=self.init_rho2, trainable=True)
# Add KL-divergence loss
self.add_loss(lambda: self.KL())
# Create masks for pruning
self.kernel_mask = tf.ones_like(self.kernel_mu)
self.bias_mask = tf.ones_like(self.bias_mu)
# Super build function
super().build(input_shape)
# Standard function to compute output
def call(self, inputs, **kwargs):
# If input layer, create variance
if self.is_input:
x_mean, x_var = inputs, self.data_var * tf.ones_like(inputs)
# Else, split inputs
else:
x_mean, x_var = tf.unstack(inputs, axis=-1)
# Gather posterior parameters
w_mean, b_mean = self.kernel_mu, self.bias_mu
w_var, b_var = K.softplus(self.kernel_rho2), K.softplus(self.bias_rho2)
# Compute E[h] = E[W]*E[x] + E[b]
h_mean = K.dot(x_mean, w_mean) + b_mean
# Compute Var[h] = Var[x]*(E[W]^2 + Var[W]) + E[x]^2*Var[W] + Var[b]
h_var = K.dot(x_var, (K.square(w_mean) + w_var)) + K.dot(K.square(x_mean), w_var) + b_var
# Return just output ...
if self.is_output:
# i.e. E[h] and Var[h]
return tf.stack([h_mean, h_var], axis=-1)
# ... or return with ReLU activation function
else:
# i.e. E[ReLU(h)] and Var[ReLU(h)]
y_mean, y_var = activations.relu_moments(h_mean, h_var)
return tf.stack([y_mean, y_var], axis=-1)
# Custom function to compute KL-divergence loss of layer
def KL(self):
# Kernel
w_mean = self.kernel_mu
w_var = K.softplus(self.kernel_rho2)
w_vals = (w_var + K.square(w_mean)) / self.prior_var - 1. + K.log(self.prior_var) - K.log(w_var)
KL_w = 0.5 * K.sum(tf.boolean_mask(w_vals, tf.math.is_finite(w_vals)))
# Bias
b_mean = self.bias_mu
b_var = K.softplus(self.bias_rho2)
b_vals = (b_var + K.square(b_mean)) / self.prior_var - 1. + K.log(self.prior_var) - K.log(b_var)
KL_b = 0.5 * K.sum(tf.boolean_mask(b_vals, tf.math.is_finite(b_vals)))
# Return sum of KLs
return KL_w + KL_b
# Custom function for compression based on BMR
def compress(self, red_var=1e-16):
# Kernel matrix
w_mean = self.kernel_mu
w_rho2 = self.kernel_rho2
w_var = K.softplus(w_rho2)
# Compute BMR values
BMR_w = self.BMR(w_mean, w_var, red_var)
# Compress parameters with dVFE <= 0
self.kernel_mu.assign(tf.where(BMR_w<=0, tf.zeros_like(w_mean), w_mean))
self.kernel_rho2.assign(tf.where(BMR_w<=0, -1e5*tf.ones_like(w_rho2), w_rho2))
# Update kernel mask
self.kernel_mask = tf.where(BMR_w<=0, tf.zeros_like(w_mean), self.kernel_mask)
# Bias vector
b_mean = self.bias_mu
b_rho2 = self.bias_rho2
b_var = K.softplus(b_rho2)
# Compute BMR values
BMR_b = self.BMR(b_mean, b_var, red_var)
# Compress parameters with dVFE <= 0
self.bias_mu.assign(tf.where(BMR_b<=0, tf.zeros_like(b_mean), b_mean))
self.bias_rho2.assign(tf.where(BMR_b<=0, -1e5*tf.ones_like(b_rho2), b_rho2))
# Update bias mask
self.bias_mask = tf.where(BMR_b<=0, tf.zeros_like(b_mean), self.bias_mask)
# Custom function to compute BMR values
def BMR(self, mean, var, red_var):
# Compute intermediate values
Pi_i = 1. / red_var
P_f = 1. / var
P_i = P_f + Pi_i - 1. / self.prior_var
mu_i = P_f * mean / P_i
# Return BMR values
return 0.5 * ((mean**2 * P_f - mu_i**2 * P_i) - K.log(Pi_i * P_f / P_i * self.prior_var))
# Custom function to reset model parameters
def param_reset(self):
# Kernel matrix
w_mean = self.kernel_mu
w_rho2 = self.kernel_rho2
# Reset kernel
self.kernel_mu.assign(tf.where(self.kernel_mask==0, tf.zeros_like(w_mean), w_mean))
self.kernel_rho2.assign(tf.where(self.kernel_mask==0, -1e5*tf.ones_like(w_rho2), w_rho2))
# Bias vector
b_mean = self.bias_mu
b_rho2 = self.bias_rho2
# Reset bias
self.bias_mu.assign(tf.where(self.bias_mask==0, tf.zeros_like(b_mean), b_mean))
self.bias_rho2.assign(tf.where(self.bias_mask==0, -1e5*tf.ones_like(b_rho2), b_rho2))
# Custom layer to add Gamma random variable for precision
class GammaVBP(tf.keras.layers.Layer):
"""
Dummy layer for adding an alpha and beta parameter of a Gamma distribution to a BNN.
Allows for joint optimization of posterior precision parameter(s).
"""
def __init__(self,
units, # number of output features
alpha = 1., # initial value for alpha
beta = 1., # initial value for beta
**kwargs):
# Set units
self.units = units
# Set initial alpha and beta value
self.alpha_init = initializers.constant(K.log(alpha))
self.beta_init = initializers.constant(K.log(beta))
# Other args
super().__init__(**kwargs)
# Standard function to return output shape
def compute_output_shape(self, input_shape):
return input_shape[0], self.units
# Standard function to create layer parameters
def build(self, input_shape):
# Add (log) alpha and beta parameters
self.log_alpha = self.add_weight(name='log_alpha', shape=(self.units,),
initializer=self.alpha_init, trainable=True)
self.log_beta = self.add_weight(name='log_beta', shape=(self.units,),
initializer=self.beta_init, trainable=True)
# Super build function
super().build(input_shape)
# Standard function to compute output
def call(self, inputs, **kwargs):
# Split inputs
y_mean, y_var = tf.unstack(inputs, 2, axis=-1)
# Extend alpha and beta to match inputs size
alpha = K.exp(self.log_alpha) * tf.ones_like(y_mean)
beta = K.exp(self.log_beta) * tf.ones_like(y_mean)
# Return inputs incl. alpha and beta
return tf.stack([y_mean, y_var, alpha, beta], axis=-1)
# Custom function for KL-divergence
def KL(self):
# Get alpha and beta
alpha = K.exp(self.log_alpha)
beta = K.exp(self.log_beta)
# Return KL-divergence
return K.sum((alpha - 1) * tf.math.digamma(alpha) - tf.math.lgamma(alpha) + tf.math.log(beta) + alpha * ((1 - beta) / beta))
# GRU cell (i.e. layer)
class GRUCellVBP(tf.keras.layers.Layer):
"""
Variational Gated Recurrent Unit (GRU), following Variance Back-Propagation (VBP).
It takes the number of units as its input, all other inputs are optional.
"""
def __init__(self,
units, # number of output features
is_input = False, # if layer is input layer
data_var = 1e-3, # initial value for data variance
prior_var = 1., # prior variance of parameters
std_dev = 0.01, # standard deviation of initializer
init = 'prior', # manner in which params are initialized
seed = None, # seed for (weight) initialization
**kwargs):
# Copy inputs and set seed
self.units = 3*units
self.state_size = 2*units
self.is_input = is_input
self.data_var = data_var
self.prior_var = prior_var
self.std_dev = std_dev
self.init = init
# ... and set seed
if (init is not None):
tf.random.set_seed(seed)
# Other args
super().__init__(**kwargs)
# Standard function to return output shape
def compute_output_shape(self, input_shape):
return input_shape[0], self.units
# Standard function to create layer parameters
def build(self, input_shape):
# Initializer
if self.init == 'prior': # 'prior' is (sampled around) the prior
self.init_mu = initializers.normal(mean=0., stddev=self.std_dev)
self.init_rho2 = initializers.normal(mean=K.log(K.exp(self.prior_var) - 1.),
stddev=self.std_dev)
elif self.init == 'he': # 'he' uses mean and variance from HeNormal
self.init_mu = initializers.normal(mean=0., stddev=self.std_dev)
self.init_rho2 = initializers.normal(mean=K.log(K.exp(2. / input_shape[1]) - 1.),
stddev=self.std_dev)
elif self.init == 'glorot': # 'glorot' uses mean and variance from GlorotNormal
self.init_mu = initializers.normal(mean=0., stddev=self.std_dev)
self.init_rho2 = initializers.normal(mean=K.log(K.exp(2. / (input_shape[1] + self.units)) - 1.),
stddev=self.std_dev)
elif self.init == 'paper': # 'paper' follows Haussmann et al. (2019)
self.init_mu = initializers.HeNormal()
self.init_rho2 = initializers.normal(mean=-9., stddev=1e-3)
elif self.init == 'tf': # 'tf' follows the TensorFlow implementation
self.init_mu = initializers.normal(mean=0., stddev=0.1)
self.init_rho2 = initializers.normal(mean=-6., stddev=0.1)
# Kernel matrix
self.W_mu = self.add_weight(name='W_mu', shape=(input_shape[1], self.units),
initializer=self.init_mu, trainable=True)
self.W_rho2 = self.add_weight(name='W_rho2', shape=(input_shape[1], self.units),
initializer=self.init_rho2, trainable=True)
# Hidden matrix
self.U_mu = self.add_weight(name='U_mu', shape=(int(self.state_size/2), self.units),
initializer=self.init_mu, trainable=True)
self.U_rho2 = self.add_weight(name='U_rho2', shape=(int(self.state_size/2), self.units),
initializer=self.init_rho2, trainable=True)
# Bias vector
self.b_mu = self.add_weight(name='b_mu', shape=(self.units,),
initializer=self.init_mu, trainable=True)
self.b_rho2 = self.add_weight(name='b_rho2', shape=(self.units,),
initializer=self.init_rho2, trainable=True)
# Add KL-divergence loss
self.add_loss(lambda: self.KL())
# Create masks for pruning
self.W_mask = tf.ones_like(self.W_mu)
self.U_mask = tf.ones_like(self.U_mu)
self.b_mask = tf.ones_like(self.b_mu)
# Super build function
super().build(input_shape)
# Standard function to compute output
def call(self, inputs, states, **kwargs):
# If input layer, create variance
if self.is_input:
x_mean, x_var = inputs, self.data_var * tf.ones_like(inputs)
# Else, split inputs
else:
x_mean, x_var = tf.unstack(inputs, axis=-1)
# Split states
h_min1_mean, h_min1_var = tf.split(states[0], 2, axis=1)
# Split means ...
W_r_mu, W_u_mu, W_h_mu = tf.split(self.W_mu, 3, axis=1)
U_r_mu, U_u_mu, U_h_mu = tf.split(self.U_mu, 3, axis=1)
b_r_mu, b_u_mu, b_h_mu = tf.split(self.b_mu, 3, axis=0)
# ... and variances
W_r_var, W_u_var, W_h_var = tf.split(K.softplus(self.W_rho2), 3, axis=1)
U_r_var, U_u_var, U_h_var = tf.split(K.softplus(self.U_rho2), 3, axis=1)
b_r_var, b_u_var, b_h_var = tf.split(K.softplus(self.b_rho2), 3, axis=0)
# Reset gate
r_mean = b_r_mu + K.dot(x_mean, W_r_mu) + K.dot(h_min1_mean, U_r_mu)
r_var = b_r_var + K.dot(x_var, K.square(W_r_mu) + W_r_var) + K.dot(K.square(x_mean), W_r_var) + \
K.dot(h_min1_var, K.square(U_r_mu) + U_r_var) + K.dot(K.square(h_min1_mean), U_r_var)
# Update gate
u_mean = b_u_mu + K.dot(x_mean, W_u_mu) + K.dot(h_min1_mean, U_u_mu)
u_var = b_u_var + K.dot(x_var, K.square(W_u_mu) + W_u_var) + K.dot(K.square(x_mean), W_u_var) + \
K.dot(h_min1_var, K.square(U_r_mu) + U_u_var) + K.dot(K.square(h_min1_mean), U_u_var)
# Sigmoid activations
r_mean, r_var = activations.sigmoid_moments(r_mean, r_var)
u_mean, u_var = activations.sigmoid_moments(u_mean, u_var)
# Intermediate variance, i.e. Var[r * h]
int_var = r_var * (K.square(h_min1_mean) * h_min1_var) + h_min1_var * K.square(r_mean)
# Hidden unit pre
h_pre_mean = b_h_mu + K.dot(x_mean, W_h_mu) + K.dot(r_mean * h_min1_mean, U_h_mu)
h_pre_var = b_h_var + K.dot(x_var, K.square(W_h_mu) + W_h_var) + K.dot(K.square(x_mean), W_h_var) + \
K.dot(int_var, K.square(U_h_mu) + U_h_var) + K.dot(K.square(r_mean * h_min1_mean), U_h_var)
# Tanh activation
h_pre_mean, h_pre_var = activations.tanh_moments(h_pre_mean, h_pre_var)
# Hidden unit final
h_mean = u_mean * h_min1_mean + (1. - u_mean) * h_pre_mean
h_var = u_var * (K.square(h_min1_mean) + h_min1_var) + K.square(u_mean) * h_min1_var + \
u_var * (K.square(h_pre_mean) + h_pre_var) + K.square(u_mean) * h_pre_var
# Stack outputs and concat states ...
outputs = tf.stack([h_mean, h_var], axis=-1)
states = tf.concat([h_mean, h_var], axis=1)
# ... and return
return outputs, [states]
# Custom function to compute KL-divergence values given mean and std.dev.
def kl_value(self, mean, var):
# KL-divergence values
KL = (var + K.square(mean)) / self.prior_var - 1. + K.log(self.prior_var) - K.log(var)
# Return filtered values
return 0.5 * K.sum(tf.boolean_mask(KL, tf.math.is_finite(KL)))
# Custom function to compute total KL-divergence loss of layer
def KL(self):
# Get all variances
W_var, U_var, b_var = K.softplus(self.W_rho2), K.softplus(self.U_rho2), K.softplus(self.b_rho2)
# Return values
return self.kl_value(self.W_mu, W_var) + self.kl_value(self.U_mu, U_var) + self.kl_value(self.b_mu, b_var)
# Custom function for compression based on BMR
def compress(self, red_var=1e-16):
# Kernel matrix
w_mean = self.W_mu
w_rho2 = self.W_rho2
w_var = K.softplus(w_rho2)
# Compute BMR values
BMR_w = self.BMR(w_mean, w_var, red_var)
# Compress parameters with dVFE <= 0
self.W_mu.assign(tf.where(BMR_w<=0, tf.zeros_like(w_mean), w_mean))
self.W_rho2.assign(tf.where(BMR_w<=0, -1e5*tf.ones_like(w_rho2), w_rho2))
# Update kernel mask
self.W_mask = tf.where(BMR_w<=0, tf.zeros_like(w_mean), self.W_mask)
# Hidden matrix
u_mean = self.U_mu
u_rho2 = self.U_rho2
u_var = K.softplus(u_rho2)
# Compute BMR values
BMR_u = self.BMR(u_mean, u_var, red_var)
# Compress parameters with dVFE <= 0
self.U_mu.assign(tf.where(BMR_u<=0, tf.zeros_like(u_mean), u_mean))
self.U_rho2.assign(tf.where(BMR_u<=0, -1e5*tf.ones_like(u_rho2), u_rho2))
# Update hidden mask
self.U_mask = tf.where(BMR_u<=0, tf.zeros_like(u_mean), self.U_mask)
# Bias
b_mean = self.b_mu
b_rho2 = self.b_rho2
b_var = K.softplus(b_rho2)
# Compute BMR values
BMR_b = self.BMR(b_mean, b_var, red_var)
# Compress parameters with dVFE <= 0
self.b_mu.assign(tf.where(BMR_b<=0, tf.zeros_like(b_mean), b_mean))
self.b_rho2.assign(tf.where(BMR_b<=0, -1e5*tf.ones_like(b_rho2), b_rho2))
# Update bias mask
self.b_mask = tf.where(BMR_b<=0, tf.zeros_like(b_mean), self.b_mask)
# Custom function to compute BMR values
def BMR(self, mean, var, red_var):
# Compute intermediate values
Pi_i = 1. / red_var
P_f = 1. / var
P_i = P_f + Pi_i - 1. / self.prior_var
mu_i = P_f * mean / P_i
# Return BMR values
return 0.5 * ((mean**2 * P_f - mu_i**2 * P_i) - K.log(Pi_i * P_f / P_i * self.prior_var))
# Custom function to reset model parameters
def param_reset(self):
# Kernel matrix
w_mean = self.W_mu
w_rho2 = self.W_rho2
# Reset kernel
self.W_mu.assign(tf.where(self.W_mask==0, tf.zeros_like(w_mean), w_mean))
self.W_rho2.assign(tf.where(self.W_mask==0, -1e5*tf.ones_like(w_rho2), w_rho2))
# Hidden matrix
u_mean = self.U_mu
u_rho2 = self.U_rho2
# Reset hidden
self.U_mu.assign(tf.where(self.U_mask==0, tf.zeros_like(u_mean), u_mean))
self.U_rho2.assign(tf.where(self.U_mask==0, -1e5*tf.ones_like(u_rho2), u_rho2))
# Bias vector
b_mean = self.b_mu
b_rho2 = self.b_rho2
# Reset bias
self.b_mu.assign(tf.where(self.b_mask==0, tf.zeros_like(b_mean), b_mean))
self.b_rho2.assign(tf.where(self.b_mask==0, -1e5*tf.ones_like(b_rho2), b_rho2)) | 21,930 | 41.09405 | 132 | py |
PrincipledPruningBNN | PrincipledPruningBNN-main/bayesian-tensorflow/src/bayesian_tensorflow/layers/__init__.py | # Bayes-by-Backprop layers
from .bayes_by_backprop import DenseBBB
from .bayes_by_backprop import GammaBBB
from .bayes_by_backprop import GRUCellBBB
# Variational-Back-Propagation layers
from .variance_backpropagation import DenseVBP
from .variance_backpropagation import GammaVBP
from .variance_backpropagation import GRUCellVBP | 330 | 35.777778 | 48 | py |
PrincipledPruningBNN | PrincipledPruningBNN-main/experiments/figures/__init__.py | # Imports
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
# Custom function for plotting losses after training
def PlotTrainingLosses(kl_theta, kl_tau, acc_loss, figsize=[12,8]):
"""
This function plots the VFE loss and its sperates terms.
"""
# Generate gridspec
fig = plt.figure(figsize=figsize)
gs = GridSpec(2, 2, figure=fig, wspace=0.25, hspace=0.4)
ax1 = fig.add_subplot(gs[0,0])
ax2 = fig.add_subplot(gs[0,1])
ax3 = fig.add_subplot(gs[1,0])
ax4 = fig.add_subplot(gs[1,1])
# Create epochs
n = np.arange(1, len(kl_theta)+1)
# Change font-sizes
fs_t = 18
fs_x = 16
fs_l = 14
# Plot full loss
ax1.plot(n, kl_theta+kl_tau+acc_loss)
ax1.set_title("Variational Free Energy", fontsize=fs_t)
ax1.set_xlabel('epoch', fontsize=fs_x)
ax1.set_ylabel('loss', fontsize=fs_x)
ax1.grid(linewidth=0.5, alpha=0.5)
# Plot data-term
ax2.plot(n, acc_loss)
ax2.set_title("Accuracy Loss", fontsize=fs_t)
ax2.set_xlabel('epoch', fontsize=fs_x)
ax2.set_ylabel('loss', fontsize=fs_x)
ax2.grid(linewidth=0.5, alpha=0.5)
# Plot KL-term
ax3.plot(n, kl_theta+kl_tau)
ax3.set_title("Complexity Loss", fontsize=fs_t)
ax3.set_xlabel('epoch', fontsize=fs_x)
ax3.set_ylabel('loss', fontsize=fs_x)
ax3.grid(linewidth=0.5, alpha=0.5)
# Plot Theta KL-div.
ax4.plot(n, kl_theta)
ax4.set_title("Seperate KL-div.", fontsize=fs_t)
ax4.set_xlabel('epoch', fontsize=fs_x)
ax4.set_ylabel('theta', fontsize=fs_x, color='C0')
ax4.tick_params(axis='y', labelcolor='C0')
# Plot Tau KL-div.
ax5 = ax4.twinx()
ax5.plot(n, kl_tau, color='C1')
ax5.set_ylabel('tau', fontsize=fs_x, color='C1')
ax5.tick_params(axis='y', labelcolor='C1')
ax4.grid(linewidth=0.5, alpha=0.5); | 1,865 | 29.590164 | 67 | py |
PrincipledPruningBNN | PrincipledPruningBNN-main/experiments/datasets/uci.py | # Imports
import pandas as pd
# Dataset loader function
def load(name, seed=None):
"""
This function loads the UCI datasets from their respective CSV-files, specified by the `name` input.
- Datasets: boston / concrete / energy / kin8nm / naval / powerplant / wine / yacht
"""
if name == 'boston':
# Meta-data
column_names = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS',
'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV']
y_label = 'MEDV'
# Load dataset
loc = './datasets/' + name + '.csv'
raw_dataset = pd.read_csv(loc, names=column_names, sep=' ', skipinitialspace=True)
elif name == 'concrete':
# Meta-data
column_names = ['Cement', 'Slag', 'Fly Ash', 'Water', 'Superplasticizer',
'Coarse Aggregate', 'Fine Aggregate', 'Age', 'Compressive Strength']
y_label = 'Compressive Strength'
# Load dataset
loc = './datasets/' + name + '.csv'
raw_dataset = pd.read_csv(loc, names=column_names, sep=',', skipinitialspace=True)
elif name == 'energy':
# Meta-data
column_names = ['Relative Compactness', 'Surface Area', 'Wall Area', 'Roof Area', 'Overall Height',
'Orientation', 'Glazing Area', 'Glazing Distribution', 'Heating Load', 'Cooling Load']
y_labels = ['Heating Load', 'Cooling Load']
# Load dataset
loc = './datasets/' + name + '.csv'
raw_dataset = pd.read_csv(loc, names=column_names, sep=',', skipinitialspace=True)
elif name == 'kin8nm':
# Meta-data
column_names = ['theta1', 'theta2', 'theta3', 'theta4', 'theta5', 'theta6',
'theta7', 'theta8', 'y']
y_label = 'y'
# Load dataset
loc = './datasets/' + name + '.csv'
raw_dataset = pd.read_csv(loc, names=column_names, sep=',', skipinitialspace=True)
elif name == 'naval':
# Meta-data
column_names = ['lp', 'v', 'GTT', 'GTn', 'GGn', 'Ts', 'Tp', 'T48', 'T1', 'T2',
'P48', 'P1', 'P2', 'Pexh', 'TIC', 'mf', 'Compressor', 'Turbine']
y_labels = ['Compressor', 'Turbine']
# Load dataset
loc = './datasets/' + name + '.csv'
raw_dataset = pd.read_csv(loc, names=column_names, sep=',', skipinitialspace=True)
elif name == 'powerplant':
# Meta-data
column_names = ['AT', 'V', 'AP', 'RH', 'PE']
y_label = 'PE'
# Load dataset
loc = './datasets/' + name + '.csv'
raw_dataset = pd.read_csv(loc, names=column_names, sep=',', skipinitialspace=True)
elif name == 'wine':
# Meta-data
column_names = ['Fixed Acidity', 'Volatile Acidity', 'Citric Acid', 'Residual Sugar', 'Chlorides',
'Free SO2', 'Total SO2', 'Density', 'pH', 'Sulphates', 'Alcohol', 'Quality']
y_label = 'Quality'
# Load dataset
loc = './datasets/' + name + '.csv'
raw_dataset = pd.read_csv(loc, names=column_names, sep=',', skipinitialspace=True)
raw_dataset[y_label] = raw_dataset[y_label].astype(float)
elif name == 'yacht':
# Meta-data
column_names = ['Position', 'Prismatic', 'Displacement', 'Beam-draught', 'Length-beam',
'Froude', 'Resistance']
y_label = 'Resistance'
# Load dataset
loc = './datasets/' + name + '.csv'
raw_dataset = pd.read_csv(loc, names=column_names, sep=' ', skipinitialspace=True)
# Copy dataset and drop NaNs
dataset = raw_dataset.copy()
dataset = dataset.dropna()
# Split into test and train
train_dataset = dataset.sample(frac=0.9, random_state=seed)
test_dataset = dataset.drop(train_dataset.index)
# Create features ...
x_train = train_dataset.copy()
x_test = test_dataset.copy()
# ... and labels
if (name == 'energy') or (name == 'naval'):
y_train = x_train[y_labels].copy()
x_train = x_train.drop(y_labels, axis=1)
y_test = x_test[y_labels].copy()
x_test = x_test.drop(y_labels, axis=1)
else:
y_train = x_train.pop(y_label)
y_test = x_test.pop(y_label)
# Return dataset, only single 'x' and 'y'
return (pd.concat([x_train, x_test]), pd.concat([y_train, y_test])) | 4,357 | 39.728972 | 110 | py |
PrincipledPruningBNN | PrincipledPruningBNN-main/experiments/datasets/toy.py | # Imports
import numpy as np
import tensorflow as tf
import math
# Custom function to load toy dataset
def load(name):
"""
This function creates the toy dataset specified by the `name` input.
- Datasets: sine / sawtooth / square
"""
# Create training signal
x = np.arange(0, 8, 0.01) * math.pi
if name == 'sine':
y = np.sin(x)
elif name == 'sawtooth':
y = np.sin(x) - np.sin(2*x)/2 + np.sin(3*x)/3 - np.sin(4*x)/4
elif name == 'square':
y = np.sin(x) + np.sin(3*x)/3 + np.sin(5*x)/5 + np.sin(7*x)/7
# Return data
return (x, y) | 609 | 23.4 | 72 | py |
PrincipledPruningBNN | PrincipledPruningBNN-main/experiments/datasets/__init__.py | # Import all sub-modules
from . import toy
from . import uci | 60 | 19.333333 | 24 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/tools/extra/summarize.py | #!/usr/bin/env python
"""Net summarization tool.
This tool summarizes the structure of a net in a concise but comprehensive
tabular listing, taking a prototxt file as input.
Use this tool to check at a glance that the computation you've specified is the
computation you expect.
"""
from caffe.proto import caffe_pb2
from google import protobuf
import re
import argparse
# ANSI codes for coloring blobs (used cyclically)
COLORS = ['92', '93', '94', '95', '97', '96', '42', '43;30', '100',
'444', '103;30', '107;30']
DISCONNECTED_COLOR = '41'
def read_net(filename):
net = caffe_pb2.NetParameter()
with open(filename) as f:
protobuf.text_format.Parse(f.read(), net)
return net
def format_param(param):
out = []
if len(param.name) > 0:
out.append(param.name)
if param.lr_mult != 1:
out.append('x{}'.format(param.lr_mult))
if param.decay_mult != 1:
out.append('Dx{}'.format(param.decay_mult))
return ' '.join(out)
def printed_len(s):
return len(re.sub(r'\033\[[\d;]+m', '', s))
def print_table(table, max_width):
"""Print a simple nicely-aligned table.
table must be a list of (equal-length) lists. Columns are space-separated,
and as narrow as possible, but no wider than max_width. Text may overflow
columns; note that unlike string.format, this will not affect subsequent
columns, if possible."""
max_widths = [max_width] * len(table[0])
column_widths = [max(printed_len(row[j]) + 1 for row in table)
for j in range(len(table[0]))]
column_widths = [min(w, max_w) for w, max_w in zip(column_widths, max_widths)]
for row in table:
row_str = ''
right_col = 0
for cell, width in zip(row, column_widths):
right_col += width
row_str += cell + ' '
row_str += ' ' * max(right_col - printed_len(row_str), 0)
print row_str
def summarize_net(net):
disconnected_tops = set()
for lr in net.layer:
disconnected_tops |= set(lr.top)
disconnected_tops -= set(lr.bottom)
table = []
colors = {}
for lr in net.layer:
tops = []
for ind, top in enumerate(lr.top):
color = colors.setdefault(top, COLORS[len(colors) % len(COLORS)])
if top in disconnected_tops:
top = '\033[1;4m' + top
if len(lr.loss_weight) > 0:
top = '{} * {}'.format(lr.loss_weight[ind], top)
tops.append('\033[{}m{}\033[0m'.format(color, top))
top_str = ', '.join(tops)
bottoms = []
for bottom in lr.bottom:
color = colors.get(bottom, DISCONNECTED_COLOR)
bottoms.append('\033[{}m{}\033[0m'.format(color, bottom))
bottom_str = ', '.join(bottoms)
if lr.type == 'Python':
type_str = lr.python_param.module + '.' + lr.python_param.layer
else:
type_str = lr.type
# Summarize conv/pool parameters.
# TODO support rectangular/ND parameters
conv_param = lr.convolution_param
if (lr.type in ['Convolution', 'Deconvolution']
and len(conv_param.kernel_size) == 1):
arg_str = str(conv_param.kernel_size[0])
if len(conv_param.stride) > 0 and conv_param.stride[0] != 1:
arg_str += '/' + str(conv_param.stride[0])
if len(conv_param.pad) > 0 and conv_param.pad[0] != 0:
arg_str += '+' + str(conv_param.pad[0])
arg_str += ' ' + str(conv_param.num_output)
if conv_param.group != 1:
arg_str += '/' + str(conv_param.group)
elif lr.type == 'Pooling':
arg_str = str(lr.pooling_param.kernel_size)
if lr.pooling_param.stride != 1:
arg_str += '/' + str(lr.pooling_param.stride)
if lr.pooling_param.pad != 0:
arg_str += '+' + str(lr.pooling_param.pad)
else:
arg_str = ''
if len(lr.param) > 0:
param_strs = map(format_param, lr.param)
if max(map(len, param_strs)) > 0:
param_str = '({})'.format(', '.join(param_strs))
else:
param_str = ''
else:
param_str = ''
table.append([lr.name, type_str, param_str, bottom_str, '->', top_str,
arg_str])
return table
def main():
parser = argparse.ArgumentParser(description="Print a concise summary of net computation.")
parser.add_argument('filename', help='net prototxt file to summarize')
parser.add_argument('-w', '--max-width', help='maximum field width',
type=int, default=30)
args = parser.parse_args()
net = read_net(args.filename)
table = summarize_net(net)
print_table(table, max_width=args.max_width)
if __name__ == '__main__':
main()
| 4,880 | 33.617021 | 95 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/tools/extra/extract_seconds.py | #!/usr/bin/env python
import datetime
import os
import sys
def extract_datetime_from_line(line, year):
# Expected format: I0210 13:39:22.381027 25210 solver.cpp:204] Iteration 100, lr = 0.00992565
line = line.strip().split()
month = int(line[0][1:3])
day = int(line[0][3:])
timestamp = line[1]
pos = timestamp.rfind('.')
ts = [int(x) for x in timestamp[:pos].split(':')]
hour = ts[0]
minute = ts[1]
second = ts[2]
microsecond = int(timestamp[pos + 1:])
dt = datetime.datetime(year, month, day, hour, minute, second, microsecond)
return dt
def get_log_created_year(input_file):
"""Get year from log file system timestamp
"""
log_created_time = os.path.getctime(input_file)
log_created_year = datetime.datetime.fromtimestamp(log_created_time).year
return log_created_year
def get_start_time(line_iterable, year):
"""Find start time from group of lines
"""
start_datetime = None
for line in line_iterable:
line = line.strip()
if line.find('Solving') != -1:
start_datetime = extract_datetime_from_line(line, year)
break
return start_datetime
def extract_seconds(input_file, output_file):
with open(input_file, 'r') as f:
lines = f.readlines()
log_created_year = get_log_created_year(input_file)
start_datetime = get_start_time(lines, log_created_year)
assert start_datetime, 'Start time not found'
last_dt = start_datetime
out = open(output_file, 'w')
for line in lines:
line = line.strip()
if line.find('Iteration') != -1:
dt = extract_datetime_from_line(line, log_created_year)
# if it's another year
if dt.month < last_dt.month:
log_created_year += 1
dt = extract_datetime_from_line(line, log_created_year)
last_dt = dt
elapsed_seconds = (dt - start_datetime).total_seconds()
out.write('%f\n' % elapsed_seconds)
out.close()
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage: ./extract_seconds input_file output_file')
exit(1)
extract_seconds(sys.argv[1], sys.argv[2])
| 2,208 | 29.260274 | 97 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/tools/extra/resize_and_crop_images.py | #!/usr/bin/env python
from mincepie import mapreducer, launcher
import gflags
import os
import cv2
from PIL import Image
# gflags
gflags.DEFINE_string('image_lib', 'opencv',
'OpenCV or PIL, case insensitive. The default value is the faster OpenCV.')
gflags.DEFINE_string('input_folder', '',
'The folder that contains all input images, organized in synsets.')
gflags.DEFINE_integer('output_side_length', 256,
'Expected side length of the output image.')
gflags.DEFINE_string('output_folder', '',
'The folder that we write output resized and cropped images to')
FLAGS = gflags.FLAGS
class OpenCVResizeCrop:
def resize_and_crop_image(self, input_file, output_file, output_side_length = 256):
'''Takes an image name, resize it and crop the center square
'''
img = cv2.imread(input_file)
height, width, depth = img.shape
new_height = output_side_length
new_width = output_side_length
if height > width:
new_height = output_side_length * height / width
else:
new_width = output_side_length * width / height
resized_img = cv2.resize(img, (new_width, new_height))
height_offset = (new_height - output_side_length) / 2
width_offset = (new_width - output_side_length) / 2
cropped_img = resized_img[height_offset:height_offset + output_side_length,
width_offset:width_offset + output_side_length]
cv2.imwrite(output_file, cropped_img)
class PILResizeCrop:
## http://united-coders.com/christian-harms/image-resizing-tips-every-coder-should-know/
def resize_and_crop_image(self, input_file, output_file, output_side_length = 256, fit = True):
'''Downsample the image.
'''
img = Image.open(input_file)
box = (output_side_length, output_side_length)
#preresize image with factor 2, 4, 8 and fast algorithm
factor = 1
while img.size[0]/factor > 2*box[0] and img.size[1]*2/factor > 2*box[1]:
factor *=2
if factor > 1:
img.thumbnail((img.size[0]/factor, img.size[1]/factor), Image.NEAREST)
#calculate the cropping box and get the cropped part
if fit:
x1 = y1 = 0
x2, y2 = img.size
wRatio = 1.0 * x2/box[0]
hRatio = 1.0 * y2/box[1]
if hRatio > wRatio:
y1 = int(y2/2-box[1]*wRatio/2)
y2 = int(y2/2+box[1]*wRatio/2)
else:
x1 = int(x2/2-box[0]*hRatio/2)
x2 = int(x2/2+box[0]*hRatio/2)
img = img.crop((x1,y1,x2,y2))
#Resize the image with best quality algorithm ANTI-ALIAS
img.thumbnail(box, Image.ANTIALIAS)
#save it into a file-like object
with open(output_file, 'wb') as out:
img.save(out, 'JPEG', quality=75)
class ResizeCropImagesMapper(mapreducer.BasicMapper):
'''The ImageNet Compute mapper.
The input value would be the file listing images' paths relative to input_folder.
'''
def map(self, key, value):
if type(value) is not str:
value = str(value)
files = [value]
image_lib = FLAGS.image_lib.lower()
if image_lib == 'pil':
resize_crop = PILResizeCrop()
else:
resize_crop = OpenCVResizeCrop()
for i, line in enumerate(files):
try:
line = line.replace(FLAGS.input_folder, '').strip()
line = line.split()
image_file_name = line[0]
input_file = os.path.join(FLAGS.input_folder, image_file_name)
output_file = os.path.join(FLAGS.output_folder, image_file_name)
output_dir = output_file[:output_file.rfind('/')]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
feat = resize_crop.resize_and_crop_image(input_file, output_file,
FLAGS.output_side_length)
except Exception, e:
# we ignore the exception (maybe the image is corrupted?)
print line, Exception, e
yield value, FLAGS.output_folder
mapreducer.REGISTER_DEFAULT_MAPPER(ResizeCropImagesMapper)
mapreducer.REGISTER_DEFAULT_REDUCER(mapreducer.NoPassReducer)
mapreducer.REGISTER_DEFAULT_READER(mapreducer.FileReader)
mapreducer.REGISTER_DEFAULT_WRITER(mapreducer.FileWriter)
if __name__ == '__main__':
launcher.launch()
| 4,602 | 40.845455 | 99 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/tools/extra/parse_log.py | #!/usr/bin/env python
"""
Parse training log
Evolved from parse_log.sh
"""
import os
import re
import extract_seconds
import argparse
import csv
from collections import OrderedDict
def parse_log(path_to_log):
"""Parse log file
Returns (train_dict_list, test_dict_list)
train_dict_list and test_dict_list are lists of dicts that define the table
rows
"""
regex_iteration = re.compile('Iteration (\d+)')
regex_train_output = re.compile('Train net output #(\d+): (\S+) = ([\.\deE+-]+)')
regex_test_output = re.compile('Test net output #(\d+): (\S+) = ([\.\deE+-]+)')
regex_learning_rate = re.compile('lr = ([-+]?[0-9]*\.?[0-9]+([eE]?[-+]?[0-9]+)?)')
# Pick out lines of interest
iteration = -1
learning_rate = float('NaN')
train_dict_list = []
test_dict_list = []
train_row = None
test_row = None
logfile_year = extract_seconds.get_log_created_year(path_to_log)
with open(path_to_log) as f:
start_time = extract_seconds.get_start_time(f, logfile_year)
last_time = start_time
for line in f:
iteration_match = regex_iteration.search(line)
if iteration_match:
iteration = float(iteration_match.group(1))
if iteration == -1:
# Only start parsing for other stuff if we've found the first
# iteration
continue
try:
time = extract_seconds.extract_datetime_from_line(line,
logfile_year)
except ValueError:
# Skip lines with bad formatting, for example when resuming solver
continue
# if it's another year
if time.month < last_time.month:
logfile_year += 1
time = extract_seconds.extract_datetime_from_line(line, logfile_year)
last_time = time
seconds = (time - start_time).total_seconds()
learning_rate_match = regex_learning_rate.search(line)
if learning_rate_match:
learning_rate = float(learning_rate_match.group(1))
train_dict_list, train_row = parse_line_for_net_output(
regex_train_output, train_row, train_dict_list,
line, iteration, seconds, learning_rate
)
test_dict_list, test_row = parse_line_for_net_output(
regex_test_output, test_row, test_dict_list,
line, iteration, seconds, learning_rate
)
fix_initial_nan_learning_rate(train_dict_list)
fix_initial_nan_learning_rate(test_dict_list)
return train_dict_list, test_dict_list
def parse_line_for_net_output(regex_obj, row, row_dict_list,
line, iteration, seconds, learning_rate):
"""Parse a single line for training or test output
Returns a a tuple with (row_dict_list, row)
row: may be either a new row or an augmented version of the current row
row_dict_list: may be either the current row_dict_list or an augmented
version of the current row_dict_list
"""
output_match = regex_obj.search(line)
if output_match:
if not row or row['NumIters'] != iteration:
# Push the last row and start a new one
if row:
# If we're on a new iteration, push the last row
# This will probably only happen for the first row; otherwise
# the full row checking logic below will push and clear full
# rows
row_dict_list.append(row)
row = OrderedDict([
('NumIters', iteration),
('Seconds', seconds),
('LearningRate', learning_rate)
])
# output_num is not used; may be used in the future
# output_num = output_match.group(1)
output_name = output_match.group(2)
output_val = output_match.group(3)
row[output_name] = float(output_val)
if row and len(row_dict_list) >= 1 and len(row) == len(row_dict_list[0]):
# The row is full, based on the fact that it has the same number of
# columns as the first row; append it to the list
row_dict_list.append(row)
row = None
return row_dict_list, row
def fix_initial_nan_learning_rate(dict_list):
"""Correct initial value of learning rate
Learning rate is normally not printed until after the initial test and
training step, which means the initial testing and training rows have
LearningRate = NaN. Fix this by copying over the LearningRate from the
second row, if it exists.
"""
if len(dict_list) > 1:
dict_list[0]['LearningRate'] = dict_list[1]['LearningRate']
def save_csv_files(logfile_path, output_dir, train_dict_list, test_dict_list,
delimiter=',', verbose=False):
"""Save CSV files to output_dir
If the input log file is, e.g., caffe.INFO, the names will be
caffe.INFO.train and caffe.INFO.test
"""
log_basename = os.path.basename(logfile_path)
train_filename = os.path.join(output_dir, log_basename + '.train')
write_csv(train_filename, train_dict_list, delimiter, verbose)
test_filename = os.path.join(output_dir, log_basename + '.test')
write_csv(test_filename, test_dict_list, delimiter, verbose)
def write_csv(output_filename, dict_list, delimiter, verbose=False):
"""Write a CSV file
"""
if not dict_list:
if verbose:
print('Not writing %s; no lines to write' % output_filename)
return
dialect = csv.excel
dialect.delimiter = delimiter
with open(output_filename, 'w') as f:
dict_writer = csv.DictWriter(f, fieldnames=dict_list[0].keys(),
dialect=dialect)
dict_writer.writeheader()
dict_writer.writerows(dict_list)
if verbose:
print 'Wrote %s' % output_filename
def parse_args():
description = ('Parse a Caffe training log into two CSV files '
'containing training and testing information')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('logfile_path',
help='Path to log file')
parser.add_argument('output_dir',
help='Directory in which to place output CSV files')
parser.add_argument('--verbose',
action='store_true',
help='Print some extra info (e.g., output filenames)')
parser.add_argument('--delimiter',
default=',',
help=('Column delimiter in output files '
'(default: \'%(default)s\')'))
args = parser.parse_args()
return args
def main():
args = parse_args()
train_dict_list, test_dict_list = parse_log(args.logfile_path)
save_csv_files(args.logfile_path, args.output_dir, train_dict_list,
test_dict_list, delimiter=args.delimiter, verbose=args.verbose)
if __name__ == '__main__':
main()
| 7,136 | 32.824645 | 86 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/examples/web_demo/app.py | import os
import time
import cPickle
import datetime
import logging
import flask
import werkzeug
import optparse
import tornado.wsgi
import tornado.httpserver
import numpy as np
import pandas as pd
from PIL import Image
import cStringIO as StringIO
import urllib
import exifutil
import caffe
REPO_DIRNAME = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + '/../..')
UPLOAD_FOLDER = '/tmp/caffe_demos_uploads'
ALLOWED_IMAGE_EXTENSIONS = set(['png', 'bmp', 'jpg', 'jpe', 'jpeg', 'gif'])
# Obtain the flask app object
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('index.html', has_result=False)
@app.route('/classify_url', methods=['GET'])
def classify_url():
imageurl = flask.request.args.get('imageurl', '')
try:
string_buffer = StringIO.StringIO(
urllib.urlopen(imageurl).read())
image = caffe.io.load_image(string_buffer)
except Exception as err:
# For any exception we encounter in reading the image, we will just
# not continue.
logging.info('URL Image open error: %s', err)
return flask.render_template(
'index.html', has_result=True,
result=(False, 'Cannot open image from URL.')
)
logging.info('Image: %s', imageurl)
result = app.clf.classify_image(image)
return flask.render_template(
'index.html', has_result=True, result=result, imagesrc=imageurl)
@app.route('/classify_upload', methods=['POST'])
def classify_upload():
try:
# We will save the file to disk for possible data collection.
imagefile = flask.request.files['imagefile']
filename_ = str(datetime.datetime.now()).replace(' ', '_') + \
werkzeug.secure_filename(imagefile.filename)
filename = os.path.join(UPLOAD_FOLDER, filename_)
imagefile.save(filename)
logging.info('Saving to %s.', filename)
image = exifutil.open_oriented_im(filename)
except Exception as err:
logging.info('Uploaded image open error: %s', err)
return flask.render_template(
'index.html', has_result=True,
result=(False, 'Cannot open uploaded image.')
)
result = app.clf.classify_image(image)
return flask.render_template(
'index.html', has_result=True, result=result,
imagesrc=embed_image_html(image)
)
def embed_image_html(image):
"""Creates an image embedded in HTML base64 format."""
image_pil = Image.fromarray((255 * image).astype('uint8'))
image_pil = image_pil.resize((256, 256))
string_buf = StringIO.StringIO()
image_pil.save(string_buf, format='png')
data = string_buf.getvalue().encode('base64').replace('\n', '')
return 'data:image/png;base64,' + data
def allowed_file(filename):
return (
'.' in filename and
filename.rsplit('.', 1)[1] in ALLOWED_IMAGE_EXTENSIONS
)
class ImagenetClassifier(object):
default_args = {
'model_def_file': (
'{}/models/bvlc_reference_caffenet/deploy.prototxt'.format(REPO_DIRNAME)),
'pretrained_model_file': (
'{}/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'.format(REPO_DIRNAME)),
'mean_file': (
'{}/python/caffe/imagenet/ilsvrc_2012_mean.npy'.format(REPO_DIRNAME)),
'class_labels_file': (
'{}/data/ilsvrc12/synset_words.txt'.format(REPO_DIRNAME)),
'bet_file': (
'{}/data/ilsvrc12/imagenet.bet.pickle'.format(REPO_DIRNAME)),
}
for key, val in default_args.iteritems():
if not os.path.exists(val):
raise Exception(
"File for {} is missing. Should be at: {}".format(key, val))
default_args['image_dim'] = 256
default_args['raw_scale'] = 255.
def __init__(self, model_def_file, pretrained_model_file, mean_file,
raw_scale, class_labels_file, bet_file, image_dim, gpu_mode):
logging.info('Loading net and associated files...')
if gpu_mode:
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
self.net = caffe.Classifier(
model_def_file, pretrained_model_file,
image_dims=(image_dim, image_dim), raw_scale=raw_scale,
mean=np.load(mean_file).mean(1).mean(1), channel_swap=(2, 1, 0)
)
with open(class_labels_file) as f:
labels_df = pd.DataFrame([
{
'synset_id': l.strip().split(' ')[0],
'name': ' '.join(l.strip().split(' ')[1:]).split(',')[0]
}
for l in f.readlines()
])
self.labels = labels_df.sort('synset_id')['name'].values
self.bet = cPickle.load(open(bet_file))
# A bias to prefer children nodes in single-chain paths
# I am setting the value to 0.1 as a quick, simple model.
# We could use better psychological models here...
self.bet['infogain'] -= np.array(self.bet['preferences']) * 0.1
def classify_image(self, image):
try:
starttime = time.time()
scores = self.net.predict([image], oversample=True).flatten()
endtime = time.time()
indices = (-scores).argsort()[:5]
predictions = self.labels[indices]
# In addition to the prediction text, we will also produce
# the length for the progress bar visualization.
meta = [
(p, '%.5f' % scores[i])
for i, p in zip(indices, predictions)
]
logging.info('result: %s', str(meta))
# Compute expected information gain
expected_infogain = np.dot(
self.bet['probmat'], scores[self.bet['idmapping']])
expected_infogain *= self.bet['infogain']
# sort the scores
infogain_sort = expected_infogain.argsort()[::-1]
bet_result = [(self.bet['words'][v], '%.5f' % expected_infogain[v])
for v in infogain_sort[:5]]
logging.info('bet result: %s', str(bet_result))
return (True, meta, bet_result, '%.3f' % (endtime - starttime))
except Exception as err:
logging.info('Classification error: %s', err)
return (False, 'Something went wrong when classifying the '
'image. Maybe try another one?')
def start_tornado(app, port=5000):
http_server = tornado.httpserver.HTTPServer(
tornado.wsgi.WSGIContainer(app))
http_server.listen(port)
print("Tornado server starting on port {}".format(port))
tornado.ioloop.IOLoop.instance().start()
def start_from_terminal(app):
"""
Parse command line options and start the server.
"""
parser = optparse.OptionParser()
parser.add_option(
'-d', '--debug',
help="enable debug mode",
action="store_true", default=False)
parser.add_option(
'-p', '--port',
help="which port to serve content on",
type='int', default=5000)
parser.add_option(
'-g', '--gpu',
help="use gpu mode",
action='store_true', default=False)
opts, args = parser.parse_args()
ImagenetClassifier.default_args.update({'gpu_mode': opts.gpu})
# Initialize classifier + warm start by forward for allocation
app.clf = ImagenetClassifier(**ImagenetClassifier.default_args)
app.clf.net.forward()
if opts.debug:
app.run(debug=True, host='0.0.0.0', port=opts.port)
else:
start_tornado(app, opts.port)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
start_from_terminal(app)
| 7,793 | 33.184211 | 105 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/examples/web_demo/exifutil.py | """
This script handles the skimage exif problem.
"""
from PIL import Image
import numpy as np
ORIENTATIONS = { # used in apply_orientation
2: (Image.FLIP_LEFT_RIGHT,),
3: (Image.ROTATE_180,),
4: (Image.FLIP_TOP_BOTTOM,),
5: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_90),
6: (Image.ROTATE_270,),
7: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_270),
8: (Image.ROTATE_90,)
}
def open_oriented_im(im_path):
im = Image.open(im_path)
if hasattr(im, '_getexif'):
exif = im._getexif()
if exif is not None and 274 in exif:
orientation = exif[274]
im = apply_orientation(im, orientation)
img = np.asarray(im).astype(np.float32) / 255.
if img.ndim == 2:
img = img[:, :, np.newaxis]
img = np.tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def apply_orientation(im, orientation):
if orientation in ORIENTATIONS:
for method in ORIENTATIONS[orientation]:
im = im.transpose(method)
return im
| 1,046 | 25.175 | 51 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/examples/pycaffe/caffenet.py | from __future__ import print_function
from caffe import layers as L, params as P, to_proto
from caffe.proto import caffe_pb2
# helper function for common structures
def conv_relu(bottom, ks, nout, stride=1, pad=0, group=1):
conv = L.Convolution(bottom, kernel_size=ks, stride=stride,
num_output=nout, pad=pad, group=group)
return conv, L.ReLU(conv, in_place=True)
def fc_relu(bottom, nout):
fc = L.InnerProduct(bottom, num_output=nout)
return fc, L.ReLU(fc, in_place=True)
def max_pool(bottom, ks, stride=1):
return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=ks, stride=stride)
def caffenet(lmdb, batch_size=256, include_acc=False):
data, label = L.Data(source=lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=True))
# the net itself
conv1, relu1 = conv_relu(data, 11, 96, stride=4)
pool1 = max_pool(relu1, 3, stride=2)
norm1 = L.LRN(pool1, local_size=5, alpha=1e-4, beta=0.75)
conv2, relu2 = conv_relu(norm1, 5, 256, pad=2, group=2)
pool2 = max_pool(relu2, 3, stride=2)
norm2 = L.LRN(pool2, local_size=5, alpha=1e-4, beta=0.75)
conv3, relu3 = conv_relu(norm2, 3, 384, pad=1)
conv4, relu4 = conv_relu(relu3, 3, 384, pad=1, group=2)
conv5, relu5 = conv_relu(relu4, 3, 256, pad=1, group=2)
pool5 = max_pool(relu5, 3, stride=2)
fc6, relu6 = fc_relu(pool5, 4096)
drop6 = L.Dropout(relu6, in_place=True)
fc7, relu7 = fc_relu(drop6, 4096)
drop7 = L.Dropout(relu7, in_place=True)
fc8 = L.InnerProduct(drop7, num_output=1000)
loss = L.SoftmaxWithLoss(fc8, label)
if include_acc:
acc = L.Accuracy(fc8, label)
return to_proto(loss, acc)
else:
return to_proto(loss)
def make_net():
with open('train.prototxt', 'w') as f:
print(caffenet('/path/to/caffe-train-lmdb'), file=f)
with open('test.prototxt', 'w') as f:
print(caffenet('/path/to/caffe-val-lmdb', batch_size=50, include_acc=True), file=f)
if __name__ == '__main__':
make_net()
| 2,112 | 36.732143 | 91 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/examples/pycaffe/tools.py | import numpy as np
class SimpleTransformer:
"""
SimpleTransformer is a simple class for preprocessing and deprocessing
images for caffe.
"""
def __init__(self, mean=[128, 128, 128]):
self.mean = np.array(mean, dtype=np.float32)
self.scale = 1.0
def set_mean(self, mean):
"""
Set the mean to subtract for centering the data.
"""
self.mean = mean
def set_scale(self, scale):
"""
Set the data scaling.
"""
self.scale = scale
def preprocess(self, im):
"""
preprocess() emulate the pre-processing occurring in the vgg16 caffe
prototxt.
"""
im = np.float32(im)
im = im[:, :, ::-1] # change to BGR
im -= self.mean
im *= self.scale
im = im.transpose((2, 0, 1))
return im
def deprocess(self, im):
"""
inverse of preprocess()
"""
im = im.transpose(1, 2, 0)
im /= self.scale
im += self.mean
im = im[:, :, ::-1] # change to RGB
return np.uint8(im)
class CaffeSolver:
"""
Caffesolver is a class for creating a solver.prototxt file. It sets default
values and can export a solver parameter file.
Note that all parameters are stored as strings. Strings variables are
stored as strings in strings.
"""
def __init__(self, testnet_prototxt_path="testnet.prototxt",
trainnet_prototxt_path="trainnet.prototxt", debug=False):
self.sp = {}
# critical:
self.sp['base_lr'] = '0.001'
self.sp['momentum'] = '0.9'
# speed:
self.sp['test_iter'] = '100'
self.sp['test_interval'] = '250'
# looks:
self.sp['display'] = '25'
self.sp['snapshot'] = '2500'
self.sp['snapshot_prefix'] = '"snapshot"' # string within a string!
# learning rate policy
self.sp['lr_policy'] = '"fixed"'
# important, but rare:
self.sp['gamma'] = '0.1'
self.sp['weight_decay'] = '0.0005'
self.sp['train_net'] = '"' + trainnet_prototxt_path + '"'
self.sp['test_net'] = '"' + testnet_prototxt_path + '"'
# pretty much never change these.
self.sp['max_iter'] = '100000'
self.sp['test_initialization'] = 'false'
self.sp['average_loss'] = '25' # this has to do with the display.
self.sp['iter_size'] = '1' # this is for accumulating gradients
if (debug):
self.sp['max_iter'] = '12'
self.sp['test_iter'] = '1'
self.sp['test_interval'] = '4'
self.sp['display'] = '1'
def add_from_file(self, filepath):
"""
Reads a caffe solver prototxt file and updates the Caffesolver
instance parameters.
"""
with open(filepath, 'r') as f:
for line in f:
if line[0] == '#':
continue
splitLine = line.split(':')
self.sp[splitLine[0].strip()] = splitLine[1].strip()
def write(self, filepath):
"""
Export solver parameters to INPUT "filepath". Sorted alphabetically.
"""
f = open(filepath, 'w')
for key, value in sorted(self.sp.items()):
if not(type(value) is str):
raise TypeError('All solver parameters must be strings')
f.write('%s: %s\n' % (key, value))
| 3,457 | 27.344262 | 79 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/examples/pycaffe/layers/pascal_multilabel_datalayers.py | # imports
import json
import time
import pickle
import scipy.misc
import skimage.io
import caffe
import numpy as np
import os.path as osp
from xml.dom import minidom
from random import shuffle
from threading import Thread
from PIL import Image
from tools import SimpleTransformer
class PascalMultilabelDataLayerSync(caffe.Layer):
"""
This is a simple synchronous datalayer for training a multilabel model on
PASCAL.
"""
def setup(self, bottom, top):
self.top_names = ['data', 'label']
# === Read input parameters ===
# params is a python dictionary with layer parameters.
params = eval(self.param_str)
# Check the parameters for validity.
check_params(params)
# store input as class variables
self.batch_size = params['batch_size']
# Create a batch loader to load the images.
self.batch_loader = BatchLoader(params, None)
# === reshape tops ===
# since we use a fixed input image size, we can shape the data layer
# once. Else, we'd have to do it in the reshape call.
top[0].reshape(
self.batch_size, 3, params['im_shape'][0], params['im_shape'][1])
# Note the 20 channels (because PASCAL has 20 classes.)
top[1].reshape(self.batch_size, 20)
print_info("PascalMultilabelDataLayerSync", params)
def forward(self, bottom, top):
"""
Load data.
"""
for itt in range(self.batch_size):
# Use the batch loader to load the next image.
im, multilabel = self.batch_loader.load_next_image()
# Add directly to the caffe data layer
top[0].data[itt, ...] = im
top[1].data[itt, ...] = multilabel
def reshape(self, bottom, top):
"""
There is no need to reshape the data, since the input is of fixed size
(rows and columns)
"""
pass
def backward(self, top, propagate_down, bottom):
"""
These layers does not back propagate
"""
pass
class BatchLoader(object):
"""
This class abstracts away the loading of images.
Images can either be loaded singly, or in a batch. The latter is used for
the asyncronous data layer to preload batches while other processing is
performed.
"""
def __init__(self, params, result):
self.result = result
self.batch_size = params['batch_size']
self.pascal_root = params['pascal_root']
self.im_shape = params['im_shape']
# get list of image indexes.
list_file = params['split'] + '.txt'
self.indexlist = [line.rstrip('\n') for line in open(
osp.join(self.pascal_root, 'ImageSets/Main', list_file))]
self._cur = 0 # current image
# this class does some simple data-manipulations
self.transformer = SimpleTransformer()
print "BatchLoader initialized with {} images".format(
len(self.indexlist))
def load_next_image(self):
"""
Load the next image in a batch.
"""
# Did we finish an epoch?
if self._cur == len(self.indexlist):
self._cur = 0
shuffle(self.indexlist)
# Load an image
index = self.indexlist[self._cur] # Get the image index
image_file_name = index + '.jpg'
im = np.asarray(Image.open(
osp.join(self.pascal_root, 'JPEGImages', image_file_name)))
im = scipy.misc.imresize(im, self.im_shape) # resize
# do a simple horizontal flip as data augmentation
flip = np.random.choice(2)*2-1
im = im[:, ::flip, :]
# Load and prepare ground truth
multilabel = np.zeros(20).astype(np.float32)
anns = load_pascal_annotation(index, self.pascal_root)
for label in anns['gt_classes']:
# in the multilabel problem we don't care how MANY instances
# there are of each class. Only if they are present.
# The "-1" is b/c we are not interested in the background
# class.
multilabel[label - 1] = 1
self._cur += 1
return self.transformer.preprocess(im), multilabel
def load_pascal_annotation(index, pascal_root):
"""
This code is borrowed from Ross Girshick's FAST-RCNN code
(https://github.com/rbgirshick/fast-rcnn).
It parses the PASCAL .xml metadata files.
See publication for further details: (http://arxiv.org/abs/1504.08083).
Thanks Ross!
"""
classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
class_to_ind = dict(zip(classes, xrange(21)))
filename = osp.join(pascal_root, 'Annotations', index + '.xml')
# print 'Loading: {}'.format(filename)
def get_data_from_tag(node, tag):
return node.getElementsByTagName(tag)[0].childNodes[0].data
with open(filename) as f:
data = minidom.parseString(f.read())
objs = data.getElementsByTagName('object')
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, 21), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
# Make pixel indexes 0-based
x1 = float(get_data_from_tag(obj, 'xmin')) - 1
y1 = float(get_data_from_tag(obj, 'ymin')) - 1
x2 = float(get_data_from_tag(obj, 'xmax')) - 1
y2 = float(get_data_from_tag(obj, 'ymax')) - 1
cls = class_to_ind[
str(get_data_from_tag(obj, "name")).lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'index': index}
def check_params(params):
"""
A utility function to check the parameters for the data layers.
"""
assert 'split' in params.keys(
), 'Params must include split (train, val, or test).'
required = ['batch_size', 'pascal_root', 'im_shape']
for r in required:
assert r in params.keys(), 'Params must include {}'.format(r)
def print_info(name, params):
"""
Output some info regarding the class
"""
print "{} initialized for split: {}, with bs: {}, im_shape: {}.".format(
name,
params['split'],
params['batch_size'],
params['im_shape'])
| 6,846 | 30.552995 | 78 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/examples/pycaffe/layers/pyloss.py | import caffe
import numpy as np
class EuclideanLossLayer(caffe.Layer):
"""
Compute the Euclidean Loss in the same manner as the C++ EuclideanLossLayer
to demonstrate the class interface for developing layers in Python.
"""
def setup(self, bottom, top):
# check input pair
if len(bottom) != 2:
raise Exception("Need two inputs to compute distance.")
def reshape(self, bottom, top):
# check input dimensions match
if bottom[0].count != bottom[1].count:
raise Exception("Inputs must have the same dimension.")
# difference is shape of inputs
self.diff = np.zeros_like(bottom[0].data, dtype=np.float32)
# loss output is scalar
top[0].reshape(1)
def forward(self, bottom, top):
self.diff[...] = bottom[0].data - bottom[1].data
top[0].data[...] = np.sum(self.diff**2) / bottom[0].num / 2.
def backward(self, top, propagate_down, bottom):
for i in range(2):
if not propagate_down[i]:
continue
if i == 0:
sign = 1
else:
sign = -1
bottom[i].diff[...] = sign * self.diff / bottom[i].num
| 1,223 | 31.210526 | 79 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/examples/finetune_flickr_style/assemble_data.py | #!/usr/bin/env python
"""
Form a subset of the Flickr Style data, download images to dirname, and write
Caffe ImagesDataLayer training file.
"""
import os
import urllib
import hashlib
import argparse
import numpy as np
import pandas as pd
from skimage import io
import multiprocessing
# Flickr returns a special image if the request is unavailable.
MISSING_IMAGE_SHA1 = '6a92790b1c2a301c6e7ddef645dca1f53ea97ac2'
example_dirname = os.path.abspath(os.path.dirname(__file__))
caffe_dirname = os.path.abspath(os.path.join(example_dirname, '../..'))
training_dirname = os.path.join(caffe_dirname, 'data/flickr_style')
def download_image(args_tuple):
"For use with multiprocessing map. Returns filename on fail."
try:
url, filename = args_tuple
if not os.path.exists(filename):
urllib.urlretrieve(url, filename)
with open(filename) as f:
assert hashlib.sha1(f.read()).hexdigest() != MISSING_IMAGE_SHA1
test_read_image = io.imread(filename)
return True
except KeyboardInterrupt:
raise Exception() # multiprocessing doesn't catch keyboard exceptions
except:
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Download a subset of Flickr Style to a directory')
parser.add_argument(
'-s', '--seed', type=int, default=0,
help="random seed")
parser.add_argument(
'-i', '--images', type=int, default=-1,
help="number of images to use (-1 for all [default])",
)
parser.add_argument(
'-w', '--workers', type=int, default=-1,
help="num workers used to download images. -x uses (all - x) cores [-1 default]."
)
parser.add_argument(
'-l', '--labels', type=int, default=0,
help="if set to a positive value, only sample images from the first number of labels."
)
args = parser.parse_args()
np.random.seed(args.seed)
# Read data, shuffle order, and subsample.
csv_filename = os.path.join(example_dirname, 'flickr_style.csv.gz')
df = pd.read_csv(csv_filename, index_col=0, compression='gzip')
df = df.iloc[np.random.permutation(df.shape[0])]
if args.labels > 0:
df = df.loc[df['label'] < args.labels]
if args.images > 0 and args.images < df.shape[0]:
df = df.iloc[:args.images]
# Make directory for images and get local filenames.
if training_dirname is None:
training_dirname = os.path.join(caffe_dirname, 'data/flickr_style')
images_dirname = os.path.join(training_dirname, 'images')
if not os.path.exists(images_dirname):
os.makedirs(images_dirname)
df['image_filename'] = [
os.path.join(images_dirname, _.split('/')[-1]) for _ in df['image_url']
]
# Download images.
num_workers = args.workers
if num_workers <= 0:
num_workers = multiprocessing.cpu_count() + num_workers
print('Downloading {} images with {} workers...'.format(
df.shape[0], num_workers))
pool = multiprocessing.Pool(processes=num_workers)
map_args = zip(df['image_url'], df['image_filename'])
results = pool.map(download_image, map_args)
# Only keep rows with valid images, and write out training file lists.
df = df[results]
for split in ['train', 'test']:
split_df = df[df['_split'] == split]
filename = os.path.join(training_dirname, '{}.txt'.format(split))
split_df[['image_filename', 'label']].to_csv(
filename, sep=' ', header=None, index=None)
print('Writing train/val for {} successfully downloaded images.'.format(
df.shape[0]))
| 3,636 | 35.737374 | 94 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/src/caffe/test/test_data/generate_sample_data.py | """
Generate data used in the HDF5DataLayer and GradientBasedSolver tests.
"""
import os
import numpy as np
import h5py
script_dir = os.path.dirname(os.path.abspath(__file__))
# Generate HDF5DataLayer sample_data.h5
num_cols = 8
num_rows = 10
height = 6
width = 5
total_size = num_cols * num_rows * height * width
data = np.arange(total_size)
data = data.reshape(num_rows, num_cols, height, width)
data = data.astype('float32')
# We had a bug where data was copied into label, but the tests weren't
# catching it, so let's make label 1-indexed.
label = 1 + np.arange(num_rows)[:, np.newaxis]
label = label.astype('float32')
# We add an extra label2 dataset to test HDF5 layer's ability
# to handle arbitrary number of output ("top") Blobs.
label2 = label + 1
print data
print label
with h5py.File(script_dir + '/sample_data.h5', 'w') as f:
f['data'] = data
f['label'] = label
f['label2'] = label2
with h5py.File(script_dir + '/sample_data_2_gzip.h5', 'w') as f:
f.create_dataset(
'data', data=data + total_size,
compression='gzip', compression_opts=1
)
f.create_dataset(
'label', data=label,
compression='gzip', compression_opts=1,
dtype='uint8',
)
f.create_dataset(
'label2', data=label2,
compression='gzip', compression_opts=1,
dtype='uint8',
)
with open(script_dir + '/sample_data_list.txt', 'w') as f:
f.write('src/caffe/test/test_data/sample_data.h5\n')
f.write('src/caffe/test/test_data/sample_data_2_gzip.h5\n')
# Generate GradientBasedSolver solver_data.h5
num_cols = 3
num_rows = 8
height = 10
width = 10
data = np.random.randn(num_rows, num_cols, height, width)
data = data.reshape(num_rows, num_cols, height, width)
data = data.astype('float32')
targets = np.random.randn(num_rows, 1)
targets = targets.astype('float32')
print data
print targets
with h5py.File(script_dir + '/solver_data.h5', 'w') as f:
f['data'] = data
f['targets'] = targets
with open(script_dir + '/solver_data_list.txt', 'w') as f:
f.write('src/caffe/test/test_data/solver_data.h5\n')
| 2,104 | 24.670732 | 70 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/draw_net.py | #!/usr/bin/env python
"""
Draw a graph of the net architecture.
"""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from google.protobuf import text_format
import caffe
import caffe.draw
from caffe.proto import caffe_pb2
def parse_args():
"""Parse input arguments
"""
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('input_net_proto_file',
help='Input network prototxt file')
parser.add_argument('output_image_file',
help='Output image file')
parser.add_argument('--rankdir',
help=('One of TB (top-bottom, i.e., vertical), '
'RL (right-left, i.e., horizontal), or another '
'valid dot option; see '
'http://www.graphviz.org/doc/info/'
'attrs.html#k:rankdir'),
default='LR')
parser.add_argument('--phase',
help=('Which network phase to draw: can be TRAIN, '
'TEST, or ALL. If ALL, then all layers are drawn '
'regardless of phase.'),
default="ALL")
args = parser.parse_args()
return args
def main():
args = parse_args()
net = caffe_pb2.NetParameter()
text_format.Merge(open(args.input_net_proto_file).read(), net)
print('Drawing net to %s' % args.output_image_file)
phase=None;
if args.phase == "TRAIN":
phase = caffe.TRAIN
elif args.phase == "TEST":
phase = caffe.TEST
elif args.phase != "ALL":
raise ValueError("Unknown phase: " + args.phase)
caffe.draw.draw_net_to_file(net, args.output_image_file, args.rankdir,
phase)
if __name__ == '__main__':
main()
| 1,934 | 31.79661 | 81 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/detect.py | #!/usr/bin/env python
"""
detector.py is an out-of-the-box windowed detector
callable from the command line.
By default it configures and runs the Caffe reference ImageNet model.
Note that this model was trained for image classification and not detection,
and finetuning for detection can be expected to improve results.
The selective_search_ijcv_with_python code required for the selective search
proposal mode is available at
https://github.com/sergeyk/selective_search_ijcv_with_python
TODO:
- batch up image filenames as well: don't want to load all of them into memory
- come up with a batching scheme that preserved order / keeps a unique ID
"""
import numpy as np
import pandas as pd
import os
import argparse
import time
import caffe
CROP_MODES = ['list', 'selective_search']
COORD_COLS = ['ymin', 'xmin', 'ymax', 'xmax']
def main(argv):
pycaffe_dir = os.path.dirname(__file__)
parser = argparse.ArgumentParser()
# Required arguments: input and output.
parser.add_argument(
"input_file",
help="Input txt/csv filename. If .txt, must be list of filenames.\
If .csv, must be comma-separated file with header\
'filename, xmin, ymin, xmax, ymax'"
)
parser.add_argument(
"output_file",
help="Output h5/csv filename. Format depends on extension."
)
# Optional arguments.
parser.add_argument(
"--model_def",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/deploy.prototxt"),
help="Model definition file."
)
parser.add_argument(
"--pretrained_model",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel"),
help="Trained model weights file."
)
parser.add_argument(
"--crop_mode",
default="selective_search",
choices=CROP_MODES,
help="How to generate windows for detection."
)
parser.add_argument(
"--gpu",
action='store_true',
help="Switch for gpu computation."
)
parser.add_argument(
"--mean_file",
default=os.path.join(pycaffe_dir,
'caffe/imagenet/ilsvrc_2012_mean.npy'),
help="Data set image mean of H x W x K dimensions (numpy array). " +
"Set to '' for no mean subtraction."
)
parser.add_argument(
"--input_scale",
type=float,
help="Multiply input features by this scale to finish preprocessing."
)
parser.add_argument(
"--raw_scale",
type=float,
default=255.0,
help="Multiply raw input by this scale before preprocessing."
)
parser.add_argument(
"--channel_swap",
default='2,1,0',
help="Order to permute input channels. The default converts " +
"RGB -> BGR since BGR is the Caffe default by way of OpenCV."
)
parser.add_argument(
"--context_pad",
type=int,
default='16',
help="Amount of surrounding context to collect in input window."
)
args = parser.parse_args()
mean, channel_swap = None, None
if args.mean_file:
mean = np.load(args.mean_file)
if mean.shape[1:] != (1, 1):
mean = mean.mean(1).mean(1)
if args.channel_swap:
channel_swap = [int(s) for s in args.channel_swap.split(',')]
if args.gpu:
caffe.set_mode_gpu()
print("GPU mode")
else:
caffe.set_mode_cpu()
print("CPU mode")
# Make detector.
detector = caffe.Detector(args.model_def, args.pretrained_model, mean=mean,
input_scale=args.input_scale, raw_scale=args.raw_scale,
channel_swap=channel_swap,
context_pad=args.context_pad)
# Load input.
t = time.time()
print("Loading input...")
if args.input_file.lower().endswith('txt'):
with open(args.input_file) as f:
inputs = [_.strip() for _ in f.readlines()]
elif args.input_file.lower().endswith('csv'):
inputs = pd.read_csv(args.input_file, sep=',', dtype={'filename': str})
inputs.set_index('filename', inplace=True)
else:
raise Exception("Unknown input file type: not in txt or csv.")
# Detect.
if args.crop_mode == 'list':
# Unpack sequence of (image filename, windows).
images_windows = [
(ix, inputs.iloc[np.where(inputs.index == ix)][COORD_COLS].values)
for ix in inputs.index.unique()
]
detections = detector.detect_windows(images_windows)
else:
detections = detector.detect_selective_search(inputs)
print("Processed {} windows in {:.3f} s.".format(len(detections),
time.time() - t))
# Collect into dataframe with labeled fields.
df = pd.DataFrame(detections)
df.set_index('filename', inplace=True)
df[COORD_COLS] = pd.DataFrame(
data=np.vstack(df['window']), index=df.index, columns=COORD_COLS)
del(df['window'])
# Save results.
t = time.time()
if args.output_file.lower().endswith('csv'):
# csv
# Enumerate the class probabilities.
class_cols = ['class{}'.format(x) for x in range(NUM_OUTPUT)]
df[class_cols] = pd.DataFrame(
data=np.vstack(df['feat']), index=df.index, columns=class_cols)
df.to_csv(args.output_file, cols=COORD_COLS + class_cols)
else:
# h5
df.to_hdf(args.output_file, 'df', mode='w')
print("Saved to {} in {:.3f} s.".format(args.output_file,
time.time() - t))
if __name__ == "__main__":
import sys
main(sys.argv)
| 5,734 | 31.95977 | 88 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/classify.py | #!/usr/bin/env python
"""
classify.py is an out-of-the-box image classifer callable from the command line.
By default it configures and runs the Caffe reference ImageNet model.
"""
import numpy as np
import os
import sys
import argparse
import glob
import time
import caffe
def main(argv):
pycaffe_dir = os.path.dirname(__file__)
parser = argparse.ArgumentParser()
# Required arguments: input and output files.
parser.add_argument(
"input_file",
help="Input image, directory, or npy."
)
parser.add_argument(
"output_file",
help="Output npy filename."
)
# Optional arguments.
parser.add_argument(
"--model_def",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/deploy.prototxt"),
help="Model definition file."
)
parser.add_argument(
"--pretrained_model",
default=os.path.join(pycaffe_dir,
"../models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel"),
help="Trained model weights file."
)
parser.add_argument(
"--gpu",
action='store_true',
help="Switch for gpu computation."
)
parser.add_argument(
"--center_only",
action='store_true',
help="Switch for prediction from center crop alone instead of " +
"averaging predictions across crops (default)."
)
parser.add_argument(
"--images_dim",
default='256,256',
help="Canonical 'height,width' dimensions of input images."
)
parser.add_argument(
"--mean_file",
default=os.path.join(pycaffe_dir,
'caffe/imagenet/ilsvrc_2012_mean.npy'),
help="Data set image mean of [Channels x Height x Width] dimensions " +
"(numpy array). Set to '' for no mean subtraction."
)
parser.add_argument(
"--input_scale",
type=float,
help="Multiply input features by this scale to finish preprocessing."
)
parser.add_argument(
"--raw_scale",
type=float,
default=255.0,
help="Multiply raw input by this scale before preprocessing."
)
parser.add_argument(
"--channel_swap",
default='2,1,0',
help="Order to permute input channels. The default converts " +
"RGB -> BGR since BGR is the Caffe default by way of OpenCV."
)
parser.add_argument(
"--ext",
default='jpg',
help="Image file extension to take as input when a directory " +
"is given as the input file."
)
args = parser.parse_args()
image_dims = [int(s) for s in args.images_dim.split(',')]
mean, channel_swap = None, None
if args.mean_file:
mean = np.load(args.mean_file)
if args.channel_swap:
channel_swap = [int(s) for s in args.channel_swap.split(',')]
if args.gpu:
caffe.set_mode_gpu()
print("GPU mode")
else:
caffe.set_mode_cpu()
print("CPU mode")
# Make classifier.
classifier = caffe.Classifier(args.model_def, args.pretrained_model,
image_dims=image_dims, mean=mean,
input_scale=args.input_scale, raw_scale=args.raw_scale,
channel_swap=channel_swap)
# Load numpy array (.npy), directory glob (*.jpg), or image file.
args.input_file = os.path.expanduser(args.input_file)
if args.input_file.endswith('npy'):
print("Loading file: %s" % args.input_file)
inputs = np.load(args.input_file)
elif os.path.isdir(args.input_file):
print("Loading folder: %s" % args.input_file)
inputs =[caffe.io.load_image(im_f)
for im_f in glob.glob(args.input_file + '/*.' + args.ext)]
else:
print("Loading file: %s" % args.input_file)
inputs = [caffe.io.load_image(args.input_file)]
print("Classifying %d inputs." % len(inputs))
# Classify.
start = time.time()
predictions = classifier.predict(inputs, not args.center_only)
print("Done in %.2f s." % (time.time() - start))
# Save
print("Saving results into %s" % args.output_file)
np.save(args.output_file, predictions)
if __name__ == '__main__':
main(sys.argv)
| 4,262 | 29.669065 | 88 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/train.py | #!/usr/bin/env python
"""
Trains a model using one or more GPUs.
"""
from multiprocessing import Process
import caffe
def train(
solver, # solver proto definition
snapshot, # solver snapshot to restore
gpus, # list of device ids
timing=False, # show timing info for compute and communications
):
# NCCL uses a uid to identify a session
uid = caffe.NCCL.new_uid()
caffe.init_log()
caffe.log('Using devices %s' % str(gpus))
procs = []
for rank in range(len(gpus)):
p = Process(target=solve,
args=(solver, snapshot, gpus, timing, uid, rank))
p.daemon = True
p.start()
procs.append(p)
for p in procs:
p.join()
def time(solver, nccl):
fprop = []
bprop = []
total = caffe.Timer()
allrd = caffe.Timer()
for _ in range(len(solver.net.layers)):
fprop.append(caffe.Timer())
bprop.append(caffe.Timer())
display = solver.param.display
def show_time():
if solver.iter % display == 0:
s = '\n'
for i in range(len(solver.net.layers)):
s += 'forw %3d %8s ' % (i, solver.net._layer_names[i])
s += ': %.2f\n' % fprop[i].ms
for i in range(len(solver.net.layers) - 1, -1, -1):
s += 'back %3d %8s ' % (i, solver.net._layer_names[i])
s += ': %.2f\n' % bprop[i].ms
s += 'solver total: %.2f\n' % total.ms
s += 'allreduce: %.2f\n' % allrd.ms
caffe.log(s)
solver.net.before_forward(lambda layer: fprop[layer].start())
solver.net.after_forward(lambda layer: fprop[layer].stop())
solver.net.before_backward(lambda layer: bprop[layer].start())
solver.net.after_backward(lambda layer: bprop[layer].stop())
solver.add_callback(lambda: total.start(), lambda: (total.stop(), allrd.start()))
solver.add_callback(nccl)
solver.add_callback(lambda: '', lambda: (allrd.stop(), show_time()))
def solve(proto, snapshot, gpus, timing, uid, rank):
caffe.set_mode_gpu()
caffe.set_device(gpus[rank])
caffe.set_solver_count(len(gpus))
caffe.set_solver_rank(rank)
caffe.set_multiprocess(True)
solver = caffe.SGDSolver(proto)
if snapshot and len(snapshot) != 0:
solver.restore(snapshot)
nccl = caffe.NCCL(solver, uid)
nccl.bcast()
if timing and rank == 0:
time(solver, nccl)
else:
solver.add_callback(nccl)
if solver.param.layer_wise_reduce:
solver.net.after_backward(nccl)
solver.step(solver.param.max_iter)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--solver", required=True, help="Solver proto definition.")
parser.add_argument("--snapshot", help="Solver snapshot to restore.")
parser.add_argument("--gpus", type=int, nargs='+', default=[0],
help="List of device ids.")
parser.add_argument("--timing", action='store_true', help="Show timing info.")
args = parser.parse_args()
train(args.solver, args.snapshot, args.gpus, args.timing)
| 3,145 | 30.148515 | 85 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/caffe/net_spec.py | """Python net specification.
This module provides a way to write nets directly in Python, using a natural,
functional style. See examples/pycaffe/caffenet.py for an example.
Currently this works as a thin wrapper around the Python protobuf interface,
with layers and parameters automatically generated for the "layers" and
"params" pseudo-modules, which are actually objects using __getattr__ magic
to generate protobuf messages.
Note that when using to_proto or Top.to_proto, names of intermediate blobs will
be automatically generated. To explicitly specify blob names, use the NetSpec
class -- assign to its attributes directly to name layers, and call
NetSpec.to_proto to serialize all assigned layers.
This interface is expected to continue to evolve as Caffe gains new capabilities
for specifying nets. In particular, the automatically generated layer names
are not guaranteed to be forward-compatible.
"""
from collections import OrderedDict, Counter
from .proto import caffe_pb2
from google import protobuf
import six
def param_name_dict():
"""Find out the correspondence between layer names and parameter names."""
layer = caffe_pb2.LayerParameter()
# get all parameter names (typically underscore case) and corresponding
# type names (typically camel case), which contain the layer names
# (note that not all parameters correspond to layers, but we'll ignore that)
param_names = [f.name for f in layer.DESCRIPTOR.fields if f.name.endswith('_param')]
param_type_names = [type(getattr(layer, s)).__name__ for s in param_names]
# strip the final '_param' or 'Parameter'
param_names = [s[:-len('_param')] for s in param_names]
param_type_names = [s[:-len('Parameter')] for s in param_type_names]
return dict(zip(param_type_names, param_names))
def to_proto(*tops):
"""Generate a NetParameter that contains all layers needed to compute
all arguments."""
layers = OrderedDict()
autonames = Counter()
for top in tops:
top.fn._to_proto(layers, {}, autonames)
net = caffe_pb2.NetParameter()
net.layer.extend(layers.values())
return net
def assign_proto(proto, name, val):
"""Assign a Python object to a protobuf message, based on the Python
type (in recursive fashion). Lists become repeated fields/messages, dicts
become messages, and other types are assigned directly. For convenience,
repeated fields whose values are not lists are converted to single-element
lists; e.g., `my_repeated_int_field=3` is converted to
`my_repeated_int_field=[3]`."""
is_repeated_field = hasattr(getattr(proto, name), 'extend')
if is_repeated_field and not isinstance(val, list):
val = [val]
if isinstance(val, list):
if isinstance(val[0], dict):
for item in val:
proto_item = getattr(proto, name).add()
for k, v in six.iteritems(item):
assign_proto(proto_item, k, v)
else:
getattr(proto, name).extend(val)
elif isinstance(val, dict):
for k, v in six.iteritems(val):
assign_proto(getattr(proto, name), k, v)
else:
setattr(proto, name, val)
class Top(object):
"""A Top specifies a single output blob (which could be one of several
produced by a layer.)"""
def __init__(self, fn, n):
self.fn = fn
self.n = n
def to_proto(self):
"""Generate a NetParameter that contains all layers needed to compute
this top."""
return to_proto(self)
def _to_proto(self, layers, names, autonames):
return self.fn._to_proto(layers, names, autonames)
class Function(object):
"""A Function specifies a layer, its parameters, and its inputs (which
are Tops from other layers)."""
def __init__(self, type_name, inputs, params):
self.type_name = type_name
for index, input in enumerate(inputs):
if not isinstance(input, Top):
raise TypeError('%s input %d is not a Top (type is %s)' %
(type_name, index, type(input)))
self.inputs = inputs
self.params = params
self.ntop = self.params.get('ntop', 1)
# use del to make sure kwargs are not double-processed as layer params
if 'ntop' in self.params:
del self.params['ntop']
self.in_place = self.params.get('in_place', False)
if 'in_place' in self.params:
del self.params['in_place']
self.tops = tuple(Top(self, n) for n in range(self.ntop))
def _get_name(self, names, autonames):
if self not in names and self.ntop > 0:
names[self] = self._get_top_name(self.tops[0], names, autonames)
elif self not in names:
autonames[self.type_name] += 1
names[self] = self.type_name + str(autonames[self.type_name])
return names[self]
def _get_top_name(self, top, names, autonames):
if top not in names:
autonames[top.fn.type_name] += 1
names[top] = top.fn.type_name + str(autonames[top.fn.type_name])
return names[top]
def _to_proto(self, layers, names, autonames):
if self in layers:
return
bottom_names = []
for inp in self.inputs:
inp._to_proto(layers, names, autonames)
bottom_names.append(layers[inp.fn].top[inp.n])
layer = caffe_pb2.LayerParameter()
layer.type = self.type_name
layer.bottom.extend(bottom_names)
if self.in_place:
layer.top.extend(layer.bottom)
else:
for top in self.tops:
layer.top.append(self._get_top_name(top, names, autonames))
layer.name = self._get_name(names, autonames)
for k, v in six.iteritems(self.params):
# special case to handle generic *params
if k.endswith('param'):
assign_proto(layer, k, v)
else:
try:
assign_proto(getattr(layer,
_param_names[self.type_name] + '_param'), k, v)
except (AttributeError, KeyError):
assign_proto(layer, k, v)
layers[self] = layer
class NetSpec(object):
"""A NetSpec contains a set of Tops (assigned directly as attributes).
Calling NetSpec.to_proto generates a NetParameter containing all of the
layers needed to produce all of the assigned Tops, using the assigned
names."""
def __init__(self):
super(NetSpec, self).__setattr__('tops', OrderedDict())
def __setattr__(self, name, value):
self.tops[name] = value
def __getattr__(self, name):
return self.tops[name]
def __setitem__(self, key, value):
self.__setattr__(key, value)
def __getitem__(self, item):
return self.__getattr__(item)
def to_proto(self):
names = {v: k for k, v in six.iteritems(self.tops)}
autonames = Counter()
layers = OrderedDict()
for name, top in six.iteritems(self.tops):
top._to_proto(layers, names, autonames)
net = caffe_pb2.NetParameter()
net.layer.extend(layers.values())
return net
class Layers(object):
"""A Layers object is a pseudo-module which generates functions that specify
layers; e.g., Layers().Convolution(bottom, kernel_size=3) will produce a Top
specifying a 3x3 convolution applied to bottom."""
def __getattr__(self, name):
def layer_fn(*args, **kwargs):
fn = Function(name, args, kwargs)
if fn.ntop == 0:
return fn
elif fn.ntop == 1:
return fn.tops[0]
else:
return fn.tops
return layer_fn
class Parameters(object):
"""A Parameters object is a pseudo-module which generates constants used
in layer parameters; e.g., Parameters().Pooling.MAX is the value used
to specify max pooling."""
def __getattr__(self, name):
class Param:
def __getattr__(self, param_name):
return getattr(getattr(caffe_pb2, name + 'Parameter'), param_name)
return Param()
_param_names = param_name_dict()
layers = Layers()
params = Parameters()
| 8,277 | 34.835498 | 88 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/caffe/classifier.py | #!/usr/bin/env python
"""
Classifier is an image classifier specialization of Net.
"""
import numpy as np
import caffe
class Classifier(caffe.Net):
"""
Classifier extends Net for image class prediction
by scaling, center cropping, or oversampling.
Parameters
----------
image_dims : dimensions to scale input for cropping/sampling.
Default is to scale to net input size for whole-image crop.
mean, input_scale, raw_scale, channel_swap: params for
preprocessing options.
"""
def __init__(self, model_file, pretrained_file, image_dims=None,
mean=None, input_scale=None, raw_scale=None,
channel_swap=None):
caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)
# configure pre-processing
in_ = self.inputs[0]
self.transformer = caffe.io.Transformer(
{in_: self.blobs[in_].data.shape})
self.transformer.set_transpose(in_, (2, 0, 1))
if mean is not None:
self.transformer.set_mean(in_, mean)
if input_scale is not None:
self.transformer.set_input_scale(in_, input_scale)
if raw_scale is not None:
self.transformer.set_raw_scale(in_, raw_scale)
if channel_swap is not None:
self.transformer.set_channel_swap(in_, channel_swap)
self.crop_dims = np.array(self.blobs[in_].data.shape[2:])
if not image_dims:
image_dims = self.crop_dims
self.image_dims = image_dims
def predict(self, inputs, oversample=True):
"""
Predict classification probabilities of inputs.
Parameters
----------
inputs : iterable of (H x W x K) input ndarrays.
oversample : boolean
average predictions across center, corners, and mirrors
when True (default). Center-only prediction when False.
Returns
-------
predictions: (N x C) ndarray of class probabilities for N images and C
classes.
"""
# Scale to standardize input dimensions.
input_ = np.zeros((len(inputs),
self.image_dims[0],
self.image_dims[1],
inputs[0].shape[2]),
dtype=np.float32)
for ix, in_ in enumerate(inputs):
input_[ix] = caffe.io.resize_image(in_, self.image_dims)
if oversample:
# Generate center, corner, and mirrored crops.
input_ = caffe.io.oversample(input_, self.crop_dims)
else:
# Take center crop.
center = np.array(self.image_dims) / 2.0
crop = np.tile(center, (1, 2))[0] + np.concatenate([
-self.crop_dims / 2.0,
self.crop_dims / 2.0
])
crop = crop.astype(int)
input_ = input_[:, crop[0]:crop[2], crop[1]:crop[3], :]
# Classify
caffe_in = np.zeros(np.array(input_.shape)[[0, 3, 1, 2]],
dtype=np.float32)
for ix, in_ in enumerate(input_):
caffe_in[ix] = self.transformer.preprocess(self.inputs[0], in_)
out = self.forward_all(**{self.inputs[0]: caffe_in})
predictions = out[self.outputs[0]]
# For oversampling, average predictions across crops.
if oversample:
predictions = predictions.reshape((len(predictions) / 10, 10, -1))
predictions = predictions.mean(1)
return predictions
| 3,537 | 34.737374 | 78 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/caffe/coord_map.py | """
Determine spatial relationships between layers to relate their coordinates.
Coordinates are mapped from input-to-output (forward), but can
be mapped output-to-input (backward) by the inverse mapping too.
This helps crop and align feature maps among other uses.
"""
from __future__ import division
import numpy as np
from caffe import layers as L
PASS_THROUGH_LAYERS = ['AbsVal', 'BatchNorm', 'Bias', 'BNLL', 'Dropout',
'Eltwise', 'ELU', 'Log', 'LRN', 'Exp', 'MVN', 'Power',
'ReLU', 'PReLU', 'Scale', 'Sigmoid', 'Split', 'TanH',
'Threshold']
def conv_params(fn):
"""
Extract the spatial parameters that determine the coordinate mapping:
kernel size, stride, padding, and dilation.
Implementation detail: Convolution, Deconvolution, and Im2col layers
define these in the convolution_param message, while Pooling has its
own fields in pooling_param. This method deals with these details to
extract canonical parameters.
"""
params = fn.params.get('convolution_param', fn.params)
axis = params.get('axis', 1)
ks = np.array(params['kernel_size'], ndmin=1)
dilation = np.array(params.get('dilation', 1), ndmin=1)
assert len({'pad_h', 'pad_w', 'kernel_h', 'kernel_w', 'stride_h',
'stride_w'} & set(fn.params)) == 0, \
'cropping does not support legacy _h/_w params'
return (axis, np.array(params.get('stride', 1), ndmin=1),
(ks - 1) * dilation + 1,
np.array(params.get('pad', 0), ndmin=1))
def crop_params(fn):
"""
Extract the crop layer parameters with defaults.
"""
params = fn.params.get('crop_param', fn.params)
axis = params.get('axis', 2) # default to spatial crop for N, C, H, W
offset = np.array(params.get('offset', 0), ndmin=1)
return (axis, offset)
class UndefinedMapException(Exception):
"""
Exception raised for layers that do not have a defined coordinate mapping.
"""
pass
def coord_map(fn):
"""
Define the coordinate mapping by its
- axis
- scale: output coord[i * scale] <- input_coord[i]
- shift: output coord[i] <- output_coord[i + shift]
s.t. the identity mapping, as for pointwise layers like ReLu, is defined by
(None, 1, 0) since it is independent of axis and does not transform coords.
"""
if fn.type_name in ['Convolution', 'Pooling', 'Im2col']:
axis, stride, ks, pad = conv_params(fn)
return axis, 1 / stride, (pad - (ks - 1) / 2) / stride
elif fn.type_name == 'Deconvolution':
axis, stride, ks, pad = conv_params(fn)
return axis, stride, (ks - 1) / 2 - pad
elif fn.type_name in PASS_THROUGH_LAYERS:
return None, 1, 0
elif fn.type_name == 'Crop':
axis, offset = crop_params(fn)
axis -= 1 # -1 for last non-coordinate dim.
return axis, 1, - offset
else:
raise UndefinedMapException
class AxisMismatchException(Exception):
"""
Exception raised for mappings with incompatible axes.
"""
pass
def compose(base_map, next_map):
"""
Compose a base coord map with scale a1, shift b1 with a further coord map
with scale a2, shift b2. The scales multiply and the further shift, b2,
is scaled by base coord scale a1.
"""
ax1, a1, b1 = base_map
ax2, a2, b2 = next_map
if ax1 is None:
ax = ax2
elif ax2 is None or ax1 == ax2:
ax = ax1
else:
raise AxisMismatchException
return ax, a1 * a2, a1 * b2 + b1
def inverse(coord_map):
"""
Invert a coord map by de-scaling and un-shifting;
this gives the backward mapping for the gradient.
"""
ax, a, b = coord_map
return ax, 1 / a, -b / a
def coord_map_from_to(top_from, top_to):
"""
Determine the coordinate mapping betweeen a top (from) and a top (to).
Walk the graph to find a common ancestor while composing the coord maps for
from and to until they meet. As a last step the from map is inverted.
"""
# We need to find a common ancestor of top_from and top_to.
# We'll assume that all ancestors are equivalent here (otherwise the graph
# is an inconsistent state (which we could improve this to check for)).
# For now use a brute-force algorithm.
def collect_bottoms(top):
"""
Collect the bottoms to walk for the coordinate mapping.
The general rule is that all the bottoms of a layer can be mapped, as
most layers have the same coordinate mapping for each bottom.
Crop layer is a notable exception. Only the first/cropped bottom is
mappable; the second/dimensions bottom is excluded from the walk.
"""
bottoms = top.fn.inputs
if top.fn.type_name == 'Crop':
bottoms = bottoms[:1]
return bottoms
# walk back from top_from, keeping the coord map as we go
from_maps = {top_from: (None, 1, 0)}
frontier = {top_from}
while frontier:
top = frontier.pop()
try:
bottoms = collect_bottoms(top)
for bottom in bottoms:
from_maps[bottom] = compose(from_maps[top], coord_map(top.fn))
frontier.add(bottom)
except UndefinedMapException:
pass
# now walk back from top_to until we hit a common blob
to_maps = {top_to: (None, 1, 0)}
frontier = {top_to}
while frontier:
top = frontier.pop()
if top in from_maps:
return compose(to_maps[top], inverse(from_maps[top]))
try:
bottoms = collect_bottoms(top)
for bottom in bottoms:
to_maps[bottom] = compose(to_maps[top], coord_map(top.fn))
frontier.add(bottom)
except UndefinedMapException:
continue
# if we got here, we did not find a blob in common
raise RuntimeError('Could not compute map between tops; are they '
'connected by spatial layers?')
def crop(top_from, top_to):
"""
Define a Crop layer to crop a top (from) to another top (to) by
determining the coordinate mapping between the two and net spec'ing
the axis and shift parameters of the crop.
"""
ax, a, b = coord_map_from_to(top_from, top_to)
assert (a == 1).all(), 'scale mismatch on crop (a = {})'.format(a)
assert (b <= 0).all(), 'cannot crop negative offset (b = {})'.format(b)
assert (np.round(b) == b).all(), 'cannot crop noninteger offset ' \
'(b = {})'.format(b)
return L.Crop(top_from, top_to,
crop_param=dict(axis=ax + 1, # +1 for first cropping dim.
offset=list(-np.round(b).astype(int))))
| 6,721 | 35.139785 | 79 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/caffe/detector.py | #!/usr/bin/env python
"""
Do windowed detection by classifying a number of images/crops at once,
optionally using the selective search window proposal method.
This implementation follows ideas in
Ross Girshick, Jeff Donahue, Trevor Darrell, Jitendra Malik.
Rich feature hierarchies for accurate object detection and semantic
segmentation.
http://arxiv.org/abs/1311.2524
The selective_search_ijcv_with_python code required for the selective search
proposal mode is available at
https://github.com/sergeyk/selective_search_ijcv_with_python
"""
import numpy as np
import os
import caffe
class Detector(caffe.Net):
"""
Detector extends Net for windowed detection by a list of crops or
selective search proposals.
Parameters
----------
mean, input_scale, raw_scale, channel_swap : params for preprocessing
options.
context_pad : amount of surrounding context to take s.t. a `context_pad`
sized border of pixels in the network input image is context, as in
R-CNN feature extraction.
"""
def __init__(self, model_file, pretrained_file, mean=None,
input_scale=None, raw_scale=None, channel_swap=None,
context_pad=None):
caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST)
# configure pre-processing
in_ = self.inputs[0]
self.transformer = caffe.io.Transformer(
{in_: self.blobs[in_].data.shape})
self.transformer.set_transpose(in_, (2, 0, 1))
if mean is not None:
self.transformer.set_mean(in_, mean)
if input_scale is not None:
self.transformer.set_input_scale(in_, input_scale)
if raw_scale is not None:
self.transformer.set_raw_scale(in_, raw_scale)
if channel_swap is not None:
self.transformer.set_channel_swap(in_, channel_swap)
self.configure_crop(context_pad)
def detect_windows(self, images_windows):
"""
Do windowed detection over given images and windows. Windows are
extracted then warped to the input dimensions of the net.
Parameters
----------
images_windows: (image filename, window list) iterable.
context_crop: size of context border to crop in pixels.
Returns
-------
detections: list of {filename: image filename, window: crop coordinates,
predictions: prediction vector} dicts.
"""
# Extract windows.
window_inputs = []
for image_fname, windows in images_windows:
image = caffe.io.load_image(image_fname).astype(np.float32)
for window in windows:
window_inputs.append(self.crop(image, window))
# Run through the net (warping windows to input dimensions).
in_ = self.inputs[0]
caffe_in = np.zeros((len(window_inputs), window_inputs[0].shape[2])
+ self.blobs[in_].data.shape[2:],
dtype=np.float32)
for ix, window_in in enumerate(window_inputs):
caffe_in[ix] = self.transformer.preprocess(in_, window_in)
out = self.forward_all(**{in_: caffe_in})
predictions = out[self.outputs[0]]
# Package predictions with images and windows.
detections = []
ix = 0
for image_fname, windows in images_windows:
for window in windows:
detections.append({
'window': window,
'prediction': predictions[ix],
'filename': image_fname
})
ix += 1
return detections
def detect_selective_search(self, image_fnames):
"""
Do windowed detection over Selective Search proposals by extracting
the crop and warping to the input dimensions of the net.
Parameters
----------
image_fnames: list
Returns
-------
detections: list of {filename: image filename, window: crop coordinates,
predictions: prediction vector} dicts.
"""
import selective_search_ijcv_with_python as selective_search
# Make absolute paths so MATLAB can find the files.
image_fnames = [os.path.abspath(f) for f in image_fnames]
windows_list = selective_search.get_windows(
image_fnames,
cmd='selective_search_rcnn'
)
# Run windowed detection on the selective search list.
return self.detect_windows(zip(image_fnames, windows_list))
def crop(self, im, window):
"""
Crop a window from the image for detection. Include surrounding context
according to the `context_pad` configuration.
Parameters
----------
im: H x W x K image ndarray to crop.
window: bounding box coordinates as ymin, xmin, ymax, xmax.
Returns
-------
crop: cropped window.
"""
# Crop window from the image.
crop = im[window[0]:window[2], window[1]:window[3]]
if self.context_pad:
box = window.copy()
crop_size = self.blobs[self.inputs[0]].width # assumes square
scale = crop_size / (1. * crop_size - self.context_pad * 2)
# Crop a box + surrounding context.
half_h = (box[2] - box[0] + 1) / 2.
half_w = (box[3] - box[1] + 1) / 2.
center = (box[0] + half_h, box[1] + half_w)
scaled_dims = scale * np.array((-half_h, -half_w, half_h, half_w))
box = np.round(np.tile(center, 2) + scaled_dims)
full_h = box[2] - box[0] + 1
full_w = box[3] - box[1] + 1
scale_h = crop_size / full_h
scale_w = crop_size / full_w
pad_y = round(max(0, -box[0]) * scale_h) # amount out-of-bounds
pad_x = round(max(0, -box[1]) * scale_w)
# Clip box to image dimensions.
im_h, im_w = im.shape[:2]
box = np.clip(box, 0., [im_h, im_w, im_h, im_w])
clip_h = box[2] - box[0] + 1
clip_w = box[3] - box[1] + 1
assert(clip_h > 0 and clip_w > 0)
crop_h = round(clip_h * scale_h)
crop_w = round(clip_w * scale_w)
if pad_y + crop_h > crop_size:
crop_h = crop_size - pad_y
if pad_x + crop_w > crop_size:
crop_w = crop_size - pad_x
# collect with context padding and place in input
# with mean padding
context_crop = im[box[0]:box[2], box[1]:box[3]]
context_crop = caffe.io.resize_image(context_crop, (crop_h, crop_w))
crop = np.ones(self.crop_dims, dtype=np.float32) * self.crop_mean
crop[pad_y:(pad_y + crop_h), pad_x:(pad_x + crop_w)] = context_crop
return crop
def configure_crop(self, context_pad):
"""
Configure crop dimensions and amount of context for cropping.
If context is included, make the special input mean for context padding.
Parameters
----------
context_pad : amount of context for cropping.
"""
# crop dimensions
in_ = self.inputs[0]
tpose = self.transformer.transpose[in_]
inv_tpose = [tpose[t] for t in tpose]
self.crop_dims = np.array(self.blobs[in_].data.shape[1:])[inv_tpose]
#.transpose(inv_tpose)
# context padding
self.context_pad = context_pad
if self.context_pad:
in_ = self.inputs[0]
transpose = self.transformer.transpose.get(in_)
channel_order = self.transformer.channel_swap.get(in_)
raw_scale = self.transformer.raw_scale.get(in_)
# Padding context crops needs the mean in unprocessed input space.
mean = self.transformer.mean.get(in_)
if mean is not None:
inv_transpose = [transpose[t] for t in transpose]
crop_mean = mean.copy().transpose(inv_transpose)
if channel_order is not None:
channel_order_inverse = [channel_order.index(i)
for i in range(crop_mean.shape[2])]
crop_mean = crop_mean[:, :, channel_order_inverse]
if raw_scale is not None:
crop_mean /= raw_scale
self.crop_mean = crop_mean
else:
self.crop_mean = np.zeros(self.crop_dims, dtype=np.float32)
| 8,541 | 38.364055 | 80 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/caffe/__init__.py | from .pycaffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, RMSPropSolver, AdaDeltaSolver, AdamSolver, NCCL, Timer
from ._caffe import init_log, log, set_mode_cpu, set_mode_gpu, set_device, Layer, get_solver, layer_type_list, set_random_seed, solver_count, set_solver_count, solver_rank, set_solver_rank, set_multiprocess, has_nccl
from ._caffe import __version__
from .proto.caffe_pb2 import TRAIN, TEST
from .classifier import Classifier
from .detector import Detector
from . import io
from .net_spec import layers, params, NetSpec, to_proto
| 552 | 60.444444 | 216 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/caffe/pycaffe.py | """
Wrap the internal caffe C++ module (_caffe.so) with a clean, Pythonic
interface.
"""
from collections import OrderedDict
try:
from itertools import izip_longest
except:
from itertools import zip_longest as izip_longest
import numpy as np
from ._caffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, \
RMSPropSolver, AdaDeltaSolver, AdamSolver, NCCL, Timer
import caffe.io
import six
# We directly update methods from Net here (rather than using composition or
# inheritance) so that nets created by caffe (e.g., by SGDSolver) will
# automatically have the improved interface.
@property
def _Net_blobs(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
blobs indexed by name
"""
if not hasattr(self, '_blobs_dict'):
self._blobs_dict = OrderedDict(zip(self._blob_names, self._blobs))
return self._blobs_dict
@property
def _Net_blob_loss_weights(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
blob loss weights indexed by name
"""
if not hasattr(self, '_blobs_loss_weights_dict'):
self._blob_loss_weights_dict = OrderedDict(zip(self._blob_names,
self._blob_loss_weights))
return self._blob_loss_weights_dict
@property
def _Net_layer_dict(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
layers indexed by name
"""
if not hasattr(self, '_layer_dict'):
self._layer_dict = OrderedDict(zip(self._layer_names, self.layers))
return self._layer_dict
@property
def _Net_params(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
parameters indexed by name; each is a list of multiple blobs (e.g.,
weights and biases)
"""
if not hasattr(self, '_params_dict'):
self._params_dict = OrderedDict([(name, lr.blobs)
for name, lr in zip(
self._layer_names, self.layers)
if len(lr.blobs) > 0])
return self._params_dict
@property
def _Net_inputs(self):
if not hasattr(self, '_input_list'):
keys = list(self.blobs.keys())
self._input_list = [keys[i] for i in self._inputs]
return self._input_list
@property
def _Net_outputs(self):
if not hasattr(self, '_output_list'):
keys = list(self.blobs.keys())
self._output_list = [keys[i] for i in self._outputs]
return self._output_list
def _Net_forward(self, blobs=None, start=None, end=None, **kwargs):
"""
Forward pass: prepare inputs and run the net forward.
Parameters
----------
blobs : list of blobs to return in addition to output blobs.
kwargs : Keys are input blob names and values are blob ndarrays.
For formatting inputs for Caffe, see Net.preprocess().
If None, input is taken from data layers.
start : optional name of layer at which to begin the forward pass
end : optional name of layer at which to finish the forward pass
(inclusive)
Returns
-------
outs : {blob name: blob ndarray} dict.
"""
if blobs is None:
blobs = []
if start is not None:
start_ind = list(self._layer_names).index(start)
else:
start_ind = 0
if end is not None:
end_ind = list(self._layer_names).index(end)
outputs = set(self.top_names[end] + blobs)
else:
end_ind = len(self.layers) - 1
outputs = set(self.outputs + blobs)
if kwargs:
if set(kwargs.keys()) != set(self.inputs):
raise Exception('Input blob arguments do not match net inputs.')
# Set input according to defined shapes and make arrays single and
# C-contiguous as Caffe expects.
for in_, blob in six.iteritems(kwargs):
if blob.shape[0] != self.blobs[in_].shape[0]:
raise Exception('Input is not batch sized')
self.blobs[in_].data[...] = blob
self._forward(start_ind, end_ind)
# Unpack blobs to extract
return {out: self.blobs[out].data for out in outputs}
def _Net_backward(self, diffs=None, start=None, end=None, **kwargs):
"""
Backward pass: prepare diffs and run the net backward.
Parameters
----------
diffs : list of diffs to return in addition to bottom diffs.
kwargs : Keys are output blob names and values are diff ndarrays.
If None, top diffs are taken from forward loss.
start : optional name of layer at which to begin the backward pass
end : optional name of layer at which to finish the backward pass
(inclusive)
Returns
-------
outs: {blob name: diff ndarray} dict.
"""
if diffs is None:
diffs = []
if start is not None:
start_ind = list(self._layer_names).index(start)
else:
start_ind = len(self.layers) - 1
if end is not None:
end_ind = list(self._layer_names).index(end)
outputs = set(self.bottom_names[end] + diffs)
else:
end_ind = 0
outputs = set(self.inputs + diffs)
if kwargs:
if set(kwargs.keys()) != set(self.outputs):
raise Exception('Top diff arguments do not match net outputs.')
# Set top diffs according to defined shapes and make arrays single and
# C-contiguous as Caffe expects.
for top, diff in six.iteritems(kwargs):
if diff.shape[0] != self.blobs[top].shape[0]:
raise Exception('Diff is not batch sized')
self.blobs[top].diff[...] = diff
self._backward(start_ind, end_ind)
# Unpack diffs to extract
return {out: self.blobs[out].diff for out in outputs}
def _Net_forward_all(self, blobs=None, **kwargs):
"""
Run net forward in batches.
Parameters
----------
blobs : list of blobs to extract as in forward()
kwargs : Keys are input blob names and values are blob ndarrays.
Refer to forward().
Returns
-------
all_outs : {blob name: list of blobs} dict.
"""
# Collect outputs from batches
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
for batch in self._batch(kwargs):
outs = self.forward(blobs=blobs, **batch)
for out, out_blob in six.iteritems(outs):
all_outs[out].extend(out_blob.copy())
# Package in ndarray.
for out in all_outs:
all_outs[out] = np.asarray(all_outs[out])
# Discard padding.
pad = len(six.next(six.itervalues(all_outs))) - len(six.next(six.itervalues(kwargs)))
if pad:
for out in all_outs:
all_outs[out] = all_outs[out][:-pad]
return all_outs
def _Net_forward_backward_all(self, blobs=None, diffs=None, **kwargs):
"""
Run net forward + backward in batches.
Parameters
----------
blobs: list of blobs to extract as in forward()
diffs: list of diffs to extract as in backward()
kwargs: Keys are input (for forward) and output (for backward) blob names
and values are ndarrays. Refer to forward() and backward().
Prefilled variants are called for lack of input or output blobs.
Returns
-------
all_blobs: {blob name: blob ndarray} dict.
all_diffs: {blob name: diff ndarray} dict.
"""
# Batch blobs and diffs.
all_outs = {out: [] for out in set(self.outputs + (blobs or []))}
all_diffs = {diff: [] for diff in set(self.inputs + (diffs or []))}
forward_batches = self._batch({in_: kwargs[in_]
for in_ in self.inputs if in_ in kwargs})
backward_batches = self._batch({out: kwargs[out]
for out in self.outputs if out in kwargs})
# Collect outputs from batches (and heed lack of forward/backward batches).
for fb, bb in izip_longest(forward_batches, backward_batches, fillvalue={}):
batch_blobs = self.forward(blobs=blobs, **fb)
batch_diffs = self.backward(diffs=diffs, **bb)
for out, out_blobs in six.iteritems(batch_blobs):
all_outs[out].extend(out_blobs.copy())
for diff, out_diffs in six.iteritems(batch_diffs):
all_diffs[diff].extend(out_diffs.copy())
# Package in ndarray.
for out, diff in zip(all_outs, all_diffs):
all_outs[out] = np.asarray(all_outs[out])
all_diffs[diff] = np.asarray(all_diffs[diff])
# Discard padding at the end and package in ndarray.
pad = len(six.next(six.itervalues(all_outs))) - len(six.next(six.itervalues(kwargs)))
if pad:
for out, diff in zip(all_outs, all_diffs):
all_outs[out] = all_outs[out][:-pad]
all_diffs[diff] = all_diffs[diff][:-pad]
return all_outs, all_diffs
def _Net_set_input_arrays(self, data, labels):
"""
Set input arrays of the in-memory MemoryDataLayer.
(Note: this is only for networks declared with the memory data layer.)
"""
if labels.ndim == 1:
labels = np.ascontiguousarray(labels[:, np.newaxis, np.newaxis,
np.newaxis])
return self._set_input_arrays(data, labels)
def _Net_batch(self, blobs):
"""
Batch blob lists according to net's batch size.
Parameters
----------
blobs: Keys blob names and values are lists of blobs (of any length).
Naturally, all the lists should have the same length.
Yields
------
batch: {blob name: list of blobs} dict for a single batch.
"""
num = len(six.next(six.itervalues(blobs)))
batch_size = six.next(six.itervalues(self.blobs)).shape[0]
remainder = num % batch_size
num_batches = num // batch_size
# Yield full batches.
for b in range(num_batches):
i = b * batch_size
yield {name: blobs[name][i:i + batch_size] for name in blobs}
# Yield last padded batch, if any.
if remainder > 0:
padded_batch = {}
for name in blobs:
padding = np.zeros((batch_size - remainder,)
+ blobs[name].shape[1:])
padded_batch[name] = np.concatenate([blobs[name][-remainder:],
padding])
yield padded_batch
def _Net_get_id_name(func, field):
"""
Generic property that maps func to the layer names into an OrderedDict.
Used for top_names and bottom_names.
Parameters
----------
func: function id -> [id]
field: implementation field name (cache)
Returns
------
A one-parameter function that can be set as a property.
"""
@property
def get_id_name(self):
if not hasattr(self, field):
id_to_name = list(self.blobs)
res = OrderedDict([(self._layer_names[i],
[id_to_name[j] for j in func(self, i)])
for i in range(len(self.layers))])
setattr(self, field, res)
return getattr(self, field)
return get_id_name
# Attach methods to Net.
Net.blobs = _Net_blobs
Net.blob_loss_weights = _Net_blob_loss_weights
Net.layer_dict = _Net_layer_dict
Net.params = _Net_params
Net.forward = _Net_forward
Net.backward = _Net_backward
Net.forward_all = _Net_forward_all
Net.forward_backward_all = _Net_forward_backward_all
Net.set_input_arrays = _Net_set_input_arrays
Net._batch = _Net_batch
Net.inputs = _Net_inputs
Net.outputs = _Net_outputs
Net.top_names = _Net_get_id_name(Net._top_ids, "_top_names")
Net.bottom_names = _Net_get_id_name(Net._bottom_ids, "_bottom_names")
| 11,615 | 32.572254 | 89 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/caffe/draw.py | """
Caffe network visualization: draw the NetParameter protobuffer.
.. note::
This requires pydot>=1.0.2, which is not included in requirements.txt since
it requires graphviz and other prerequisites outside the scope of the
Caffe.
"""
from caffe.proto import caffe_pb2
"""
pydot is not supported under python 3 and pydot2 doesn't work properly.
pydotplus works nicely (pip install pydotplus)
"""
try:
# Try to load pydotplus
import pydotplus as pydot
except ImportError:
import pydot
# Internal layer and blob styles.
LAYER_STYLE_DEFAULT = {'shape': 'record',
'fillcolor': '#6495ED',
'style': 'filled'}
NEURON_LAYER_STYLE = {'shape': 'record',
'fillcolor': '#90EE90',
'style': 'filled'}
BLOB_STYLE = {'shape': 'octagon',
'fillcolor': '#E0E0E0',
'style': 'filled'}
def get_pooling_types_dict():
"""Get dictionary mapping pooling type number to type name
"""
desc = caffe_pb2.PoolingParameter.PoolMethod.DESCRIPTOR
d = {}
for k, v in desc.values_by_name.items():
d[v.number] = k
return d
def get_edge_label(layer):
"""Define edge label based on layer type.
"""
if layer.type == 'Data':
edge_label = 'Batch ' + str(layer.data_param.batch_size)
elif layer.type == 'Convolution' or layer.type == 'Deconvolution':
edge_label = str(layer.convolution_param.num_output)
elif layer.type == 'InnerProduct':
edge_label = str(layer.inner_product_param.num_output)
else:
edge_label = '""'
return edge_label
def get_layer_label(layer, rankdir):
"""Define node label based on layer type.
Parameters
----------
layer : ?
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
Returns
-------
string :
A label for the current layer
"""
if rankdir in ('TB', 'BT'):
# If graph orientation is vertical, horizontal space is free and
# vertical space is not; separate words with spaces
separator = ' '
else:
# If graph orientation is horizontal, vertical space is free and
# horizontal space is not; separate words with newlines
separator = '\\n'
if layer.type == 'Convolution' or layer.type == 'Deconvolution':
# Outer double quotes needed or else colon characters don't parse
# properly
node_label = '"%s%s(%s)%skernel size: %d%sstride: %d%spad: %d"' %\
(layer.name,
separator,
layer.type,
separator,
layer.convolution_param.kernel_size[0] if len(layer.convolution_param.kernel_size) else 1,
separator,
layer.convolution_param.stride[0] if len(layer.convolution_param.stride) else 1,
separator,
layer.convolution_param.pad[0] if len(layer.convolution_param.pad) else 0)
elif layer.type == 'Pooling':
pooling_types_dict = get_pooling_types_dict()
node_label = '"%s%s(%s %s)%skernel size: %d%sstride: %d%spad: %d"' %\
(layer.name,
separator,
pooling_types_dict[layer.pooling_param.pool],
layer.type,
separator,
layer.pooling_param.kernel_size,
separator,
layer.pooling_param.stride,
separator,
layer.pooling_param.pad)
else:
node_label = '"%s%s(%s)"' % (layer.name, separator, layer.type)
return node_label
def choose_color_by_layertype(layertype):
"""Define colors for nodes based on the layer type.
"""
color = '#6495ED' # Default
if layertype == 'Convolution' or layertype == 'Deconvolution':
color = '#FF5050'
elif layertype == 'Pooling':
color = '#FF9900'
elif layertype == 'InnerProduct':
color = '#CC33FF'
return color
def get_pydot_graph(caffe_net, rankdir, label_edges=True, phase=None):
"""Create a data structure which represents the `caffe_net`.
Parameters
----------
caffe_net : object
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
label_edges : boolean, optional
Label the edges (default is True).
phase : {caffe_pb2.Phase.TRAIN, caffe_pb2.Phase.TEST, None} optional
Include layers from this network phase. If None, include all layers.
(the default is None)
Returns
-------
pydot graph object
"""
pydot_graph = pydot.Dot(caffe_net.name if caffe_net.name else 'Net',
graph_type='digraph',
rankdir=rankdir)
pydot_nodes = {}
pydot_edges = []
for layer in caffe_net.layer:
if phase is not None:
included = False
if len(layer.include) == 0:
included = True
if len(layer.include) > 0 and len(layer.exclude) > 0:
raise ValueError('layer ' + layer.name + ' has both include '
'and exclude specified.')
for layer_phase in layer.include:
included = included or layer_phase.phase == phase
for layer_phase in layer.exclude:
included = included and not layer_phase.phase == phase
if not included:
continue
node_label = get_layer_label(layer, rankdir)
node_name = "%s_%s" % (layer.name, layer.type)
if (len(layer.bottom) == 1 and len(layer.top) == 1 and
layer.bottom[0] == layer.top[0]):
# We have an in-place neuron layer.
pydot_nodes[node_name] = pydot.Node(node_label,
**NEURON_LAYER_STYLE)
else:
layer_style = LAYER_STYLE_DEFAULT
layer_style['fillcolor'] = choose_color_by_layertype(layer.type)
pydot_nodes[node_name] = pydot.Node(node_label, **layer_style)
for bottom_blob in layer.bottom:
pydot_nodes[bottom_blob + '_blob'] = pydot.Node('%s' % bottom_blob,
**BLOB_STYLE)
edge_label = '""'
pydot_edges.append({'src': bottom_blob + '_blob',
'dst': node_name,
'label': edge_label})
for top_blob in layer.top:
pydot_nodes[top_blob + '_blob'] = pydot.Node('%s' % (top_blob))
if label_edges:
edge_label = get_edge_label(layer)
else:
edge_label = '""'
pydot_edges.append({'src': node_name,
'dst': top_blob + '_blob',
'label': edge_label})
# Now, add the nodes and edges to the graph.
for node in pydot_nodes.values():
pydot_graph.add_node(node)
for edge in pydot_edges:
pydot_graph.add_edge(
pydot.Edge(pydot_nodes[edge['src']],
pydot_nodes[edge['dst']],
label=edge['label']))
return pydot_graph
def draw_net(caffe_net, rankdir, ext='png', phase=None):
"""Draws a caffe net and returns the image string encoded using the given
extension.
Parameters
----------
caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer.
ext : string, optional
The image extension (the default is 'png').
phase : {caffe_pb2.Phase.TRAIN, caffe_pb2.Phase.TEST, None} optional
Include layers from this network phase. If None, include all layers.
(the default is None)
Returns
-------
string :
Postscript representation of the graph.
"""
return get_pydot_graph(caffe_net, rankdir, phase=phase).create(format=ext)
def draw_net_to_file(caffe_net, filename, rankdir='LR', phase=None):
"""Draws a caffe net, and saves it to file using the format given as the
file extension. Use '.raw' to output raw text that you can manually feed
to graphviz to draw graphs.
Parameters
----------
caffe_net : a caffe.proto.caffe_pb2.NetParameter protocol buffer.
filename : string
The path to a file where the networks visualization will be stored.
rankdir : {'LR', 'TB', 'BT'}
Direction of graph layout.
phase : {caffe_pb2.Phase.TRAIN, caffe_pb2.Phase.TEST, None} optional
Include layers from this network phase. If None, include all layers.
(the default is None)
"""
ext = filename[filename.rfind('.')+1:]
with open(filename, 'wb') as fid:
fid.write(draw_net(caffe_net, rankdir, ext, phase))
| 8,789 | 34.877551 | 112 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/caffe/io.py | import numpy as np
import skimage.io
from scipy.ndimage import zoom
from skimage.transform import resize
try:
# Python3 will most likely not be able to load protobuf
from caffe.proto import caffe_pb2
except:
import sys
if sys.version_info >= (3, 0):
print("Failed to include caffe_pb2, things might go wrong!")
else:
raise
## proto / datum / ndarray conversion
def blobproto_to_array(blob, return_diff=False):
"""
Convert a blob proto to an array. In default, we will just return the data,
unless return_diff is True, in which case we will return the diff.
"""
# Read the data into an array
if return_diff:
data = np.array(blob.diff)
else:
data = np.array(blob.data)
# Reshape the array
if blob.HasField('num') or blob.HasField('channels') or blob.HasField('height') or blob.HasField('width'):
# Use legacy 4D shape
return data.reshape(blob.num, blob.channels, blob.height, blob.width)
else:
return data.reshape(blob.shape.dim)
def array_to_blobproto(arr, diff=None):
"""Converts a N-dimensional array to blob proto. If diff is given, also
convert the diff. You need to make sure that arr and diff have the same
shape, and this function does not do sanity check.
"""
blob = caffe_pb2.BlobProto()
blob.shape.dim.extend(arr.shape)
blob.data.extend(arr.astype(float).flat)
if diff is not None:
blob.diff.extend(diff.astype(float).flat)
return blob
def arraylist_to_blobprotovector_str(arraylist):
"""Converts a list of arrays to a serialized blobprotovec, which could be
then passed to a network for processing.
"""
vec = caffe_pb2.BlobProtoVector()
vec.blobs.extend([array_to_blobproto(arr) for arr in arraylist])
return vec.SerializeToString()
def blobprotovector_str_to_arraylist(str):
"""Converts a serialized blobprotovec to a list of arrays.
"""
vec = caffe_pb2.BlobProtoVector()
vec.ParseFromString(str)
return [blobproto_to_array(blob) for blob in vec.blobs]
def array_to_datum(arr, label=None):
"""Converts a 3-dimensional array to datum. If the array has dtype uint8,
the output data will be encoded as a string. Otherwise, the output data
will be stored in float format.
"""
if arr.ndim != 3:
raise ValueError('Incorrect array shape.')
datum = caffe_pb2.Datum()
datum.channels, datum.height, datum.width = arr.shape
if arr.dtype == np.uint8:
datum.data = arr.tostring()
else:
datum.float_data.extend(arr.astype(float).flat)
if label is not None:
datum.label = label
return datum
def datum_to_array(datum):
"""Converts a datum to an array. Note that the label is not returned,
as one can easily get it by calling datum.label.
"""
if len(datum.data):
return np.fromstring(datum.data, dtype=np.uint8).reshape(
datum.channels, datum.height, datum.width)
else:
return np.array(datum.float_data).astype(float).reshape(
datum.channels, datum.height, datum.width)
## Pre-processing
class Transformer:
"""
Transform input for feeding into a Net.
Note: this is mostly for illustrative purposes and it is likely better
to define your own input preprocessing routine for your needs.
Parameters
----------
net : a Net for which the input should be prepared
"""
def __init__(self, inputs):
self.inputs = inputs
self.transpose = {}
self.channel_swap = {}
self.raw_scale = {}
self.mean = {}
self.input_scale = {}
def __check_input(self, in_):
if in_ not in self.inputs:
raise Exception('{} is not one of the net inputs: {}'.format(
in_, self.inputs))
def preprocess(self, in_, data):
"""
Format input for Caffe:
- convert to single
- resize to input dimensions (preserving number of channels)
- transpose dimensions to K x H x W
- reorder channels (for instance color to BGR)
- scale raw input (e.g. from [0, 1] to [0, 255] for ImageNet models)
- subtract mean
- scale feature
Parameters
----------
in_ : name of input blob to preprocess for
data : (H' x W' x K) ndarray
Returns
-------
caffe_in : (K x H x W) ndarray for input to a Net
"""
self.__check_input(in_)
caffe_in = data.astype(np.float32, copy=False)
transpose = self.transpose.get(in_)
channel_swap = self.channel_swap.get(in_)
raw_scale = self.raw_scale.get(in_)
mean = self.mean.get(in_)
input_scale = self.input_scale.get(in_)
in_dims = self.inputs[in_][2:]
if caffe_in.shape[:2] != in_dims:
caffe_in = resize_image(caffe_in, in_dims)
if transpose is not None:
caffe_in = caffe_in.transpose(transpose)
if channel_swap is not None:
caffe_in = caffe_in[channel_swap, :, :]
if raw_scale is not None:
caffe_in *= raw_scale
if mean is not None:
caffe_in -= mean
if input_scale is not None:
caffe_in *= input_scale
return caffe_in
def deprocess(self, in_, data):
"""
Invert Caffe formatting; see preprocess().
"""
self.__check_input(in_)
decaf_in = data.copy().squeeze()
transpose = self.transpose.get(in_)
channel_swap = self.channel_swap.get(in_)
raw_scale = self.raw_scale.get(in_)
mean = self.mean.get(in_)
input_scale = self.input_scale.get(in_)
if input_scale is not None:
decaf_in /= input_scale
if mean is not None:
decaf_in += mean
if raw_scale is not None:
decaf_in /= raw_scale
if channel_swap is not None:
decaf_in = decaf_in[np.argsort(channel_swap), :, :]
if transpose is not None:
decaf_in = decaf_in.transpose(np.argsort(transpose))
return decaf_in
def set_transpose(self, in_, order):
"""
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
Parameters
----------
in_ : which input to assign this channel order
order : the order to transpose the dimensions
"""
self.__check_input(in_)
if len(order) != len(self.inputs[in_]) - 1:
raise Exception('Transpose order needs to have the same number of '
'dimensions as the input.')
self.transpose[in_] = order
def set_channel_swap(self, in_, order):
"""
Set the input channel order for e.g. RGB to BGR conversion
as needed for the reference ImageNet model.
N.B. this assumes the channels are the first dimension AFTER transpose.
Parameters
----------
in_ : which input to assign this channel order
order : the order to take the channels.
(2,1,0) maps RGB to BGR for example.
"""
self.__check_input(in_)
if len(order) != self.inputs[in_][1]:
raise Exception('Channel swap needs to have the same number of '
'dimensions as the input channels.')
self.channel_swap[in_] = order
def set_raw_scale(self, in_, scale):
"""
Set the scale of raw features s.t. the input blob = input * scale.
While Python represents images in [0, 1], certain Caffe models
like CaffeNet and AlexNet represent images in [0, 255] so the raw_scale
of these models must be 255.
Parameters
----------
in_ : which input to assign this scale factor
scale : scale coefficient
"""
self.__check_input(in_)
self.raw_scale[in_] = scale
def set_mean(self, in_, mean):
"""
Set the mean to subtract for centering the data.
Parameters
----------
in_ : which input to assign this mean.
mean : mean ndarray (input dimensional or broadcastable)
"""
self.__check_input(in_)
ms = mean.shape
if mean.ndim == 1:
# broadcast channels
if ms[0] != self.inputs[in_][1]:
raise ValueError('Mean channels incompatible with input.')
mean = mean[:, np.newaxis, np.newaxis]
else:
# elementwise mean
if len(ms) == 2:
ms = (1,) + ms
if len(ms) != 3:
raise ValueError('Mean shape invalid')
if ms != self.inputs[in_][1:]:
raise ValueError('Mean shape incompatible with input shape.')
self.mean[in_] = mean
def set_input_scale(self, in_, scale):
"""
Set the scale of preprocessed inputs s.t. the blob = blob * scale.
N.B. input_scale is done AFTER mean subtraction and other preprocessing
while raw_scale is done BEFORE.
Parameters
----------
in_ : which input to assign this scale factor
scale : scale coefficient
"""
self.__check_input(in_)
self.input_scale[in_] = scale
## Image IO
def load_image(filename, color=True):
"""
Load an image converting from grayscale or alpha as needed.
Parameters
----------
filename : string
color : boolean
flag for color format. True (default) loads as RGB while False
loads as intensity (if image is already grayscale).
Returns
-------
image : an image with type np.float32 in range [0, 1]
of size (H x W x 3) in RGB or
of size (H x W x 1) in grayscale.
"""
img = skimage.img_as_float(skimage.io.imread(filename, as_grey=not color)).astype(np.float32)
if img.ndim == 2:
img = img[:, :, np.newaxis]
if color:
img = np.tile(img, (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def resize_image(im, new_dims, interp_order=1):
"""
Resize an image array with interpolation.
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
if im.shape[-1] == 1 or im.shape[-1] == 3:
im_min, im_max = im.min(), im.max()
if im_max > im_min:
# skimage is fast but only understands {1,3} channel images
# in [0, 1].
im_std = (im - im_min) / (im_max - im_min)
resized_std = resize(im_std, new_dims, order=interp_order)
resized_im = resized_std * (im_max - im_min) + im_min
else:
# the image is a constant -- avoid divide by 0
ret = np.empty((new_dims[0], new_dims[1], im.shape[-1]),
dtype=np.float32)
ret.fill(im_min)
return ret
else:
# ndimage interpolates anything but more slowly.
scale = tuple(np.array(new_dims, dtype=float) / np.array(im.shape[:2]))
resized_im = zoom(im, scale + (1,), order=interp_order)
return resized_im.astype(np.float32)
def oversample(images, crop_dims):
"""
Crop images into the four corners, center, and their mirrored versions.
Parameters
----------
image : iterable of (H x W x K) ndarrays
crop_dims : (height, width) tuple for the crops.
Returns
-------
crops : (10*N x H x W x K) ndarray of crops for number of inputs N.
"""
# Dimensions and center.
im_shape = np.array(images[0].shape)
crop_dims = np.array(crop_dims)
im_center = im_shape[:2] / 2.0
# Make crop coordinates
h_indices = (0, im_shape[0] - crop_dims[0])
w_indices = (0, im_shape[1] - crop_dims[1])
crops_ix = np.empty((5, 4), dtype=int)
curr = 0
for i in h_indices:
for j in w_indices:
crops_ix[curr] = (i, j, i + crop_dims[0], j + crop_dims[1])
curr += 1
crops_ix[4] = np.tile(im_center, (1, 2)) + np.concatenate([
-crop_dims / 2.0,
crop_dims / 2.0
])
crops_ix = np.tile(crops_ix, (2, 1))
# Extract crops
crops = np.empty((10 * len(images), crop_dims[0], crop_dims[1],
im_shape[-1]), dtype=np.float32)
ix = 0
for im in images:
for crop in crops_ix:
crops[ix] = im[crop[0]:crop[2], crop[1]:crop[3], :]
ix += 1
crops[ix-5:ix] = crops[ix-5:ix, :, ::-1, :] # flip for mirrors
return crops
| 12,743 | 32.1875 | 110 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/caffe/test/test_coord_map.py | import unittest
import numpy as np
import random
import caffe
from caffe import layers as L
from caffe import params as P
from caffe.coord_map import coord_map_from_to, crop
def coord_net_spec(ks=3, stride=1, pad=0, pool=2, dstride=2, dpad=0):
"""
Define net spec for simple conv-pool-deconv pattern common to all
coordinate mapping tests.
"""
n = caffe.NetSpec()
n.data = L.Input(shape=dict(dim=[2, 1, 100, 100]))
n.aux = L.Input(shape=dict(dim=[2, 1, 20, 20]))
n.conv = L.Convolution(
n.data, num_output=10, kernel_size=ks, stride=stride, pad=pad)
n.pool = L.Pooling(
n.conv, pool=P.Pooling.MAX, kernel_size=pool, stride=pool, pad=0)
# for upsampling kernel size is 2x stride
try:
deconv_ks = [s*2 for s in dstride]
except:
deconv_ks = dstride*2
n.deconv = L.Deconvolution(
n.pool, num_output=10, kernel_size=deconv_ks, stride=dstride, pad=dpad)
return n
class TestCoordMap(unittest.TestCase):
def setUp(self):
pass
def test_conv_pool_deconv(self):
"""
Map through conv, pool, and deconv.
"""
n = coord_net_spec()
# identity for 2x pool, 2x deconv
ax, a, b = coord_map_from_to(n.deconv, n.data)
self.assertEquals(ax, 1)
self.assertEquals(a, 1)
self.assertEquals(b, 0)
# shift-by-one for 4x pool, 4x deconv
n = coord_net_spec(pool=4, dstride=4)
ax, a, b = coord_map_from_to(n.deconv, n.data)
self.assertEquals(ax, 1)
self.assertEquals(a, 1)
self.assertEquals(b, -1)
def test_pass(self):
"""
A pass-through layer (ReLU) and conv (1x1, stride 1, pad 0)
both do identity mapping.
"""
n = coord_net_spec()
ax, a, b = coord_map_from_to(n.deconv, n.data)
n.relu = L.ReLU(n.deconv)
n.conv1x1 = L.Convolution(
n.relu, num_output=10, kernel_size=1, stride=1, pad=0)
for top in [n.relu, n.conv1x1]:
ax_pass, a_pass, b_pass = coord_map_from_to(top, n.data)
self.assertEquals(ax, ax_pass)
self.assertEquals(a, a_pass)
self.assertEquals(b, b_pass)
def test_padding(self):
"""
Padding conv adds offset while padding deconv subtracts offset.
"""
n = coord_net_spec()
ax, a, b = coord_map_from_to(n.deconv, n.data)
pad = random.randint(0, 10)
# conv padding
n = coord_net_spec(pad=pad)
_, a_pad, b_pad = coord_map_from_to(n.deconv, n.data)
self.assertEquals(a, a_pad)
self.assertEquals(b - pad, b_pad)
# deconv padding
n = coord_net_spec(dpad=pad)
_, a_pad, b_pad = coord_map_from_to(n.deconv, n.data)
self.assertEquals(a, a_pad)
self.assertEquals(b + pad, b_pad)
# pad both to cancel out
n = coord_net_spec(pad=pad, dpad=pad)
_, a_pad, b_pad = coord_map_from_to(n.deconv, n.data)
self.assertEquals(a, a_pad)
self.assertEquals(b, b_pad)
def test_multi_conv(self):
"""
Multiple bottoms/tops of a layer are identically mapped.
"""
n = coord_net_spec()
# multi bottom/top
n.conv_data, n.conv_aux = L.Convolution(
n.data, n.aux, ntop=2, num_output=10, kernel_size=5, stride=2,
pad=0)
ax1, a1, b1 = coord_map_from_to(n.conv_data, n.data)
ax2, a2, b2 = coord_map_from_to(n.conv_aux, n.aux)
self.assertEquals(ax1, ax2)
self.assertEquals(a1, a2)
self.assertEquals(b1, b2)
def test_rect(self):
"""
Anisotropic mapping is equivalent to its isotropic parts.
"""
n3x3 = coord_net_spec(ks=3, stride=1, pad=0)
n5x5 = coord_net_spec(ks=5, stride=2, pad=10)
n3x5 = coord_net_spec(ks=[3, 5], stride=[1, 2], pad=[0, 10])
ax_3x3, a_3x3, b_3x3 = coord_map_from_to(n3x3.deconv, n3x3.data)
ax_5x5, a_5x5, b_5x5 = coord_map_from_to(n5x5.deconv, n5x5.data)
ax_3x5, a_3x5, b_3x5 = coord_map_from_to(n3x5.deconv, n3x5.data)
self.assertTrue(ax_3x3 == ax_5x5 == ax_3x5)
self.assertEquals(a_3x3, a_3x5[0])
self.assertEquals(b_3x3, b_3x5[0])
self.assertEquals(a_5x5, a_3x5[1])
self.assertEquals(b_5x5, b_3x5[1])
def test_nd_conv(self):
"""
ND conv maps the same way in more dimensions.
"""
n = caffe.NetSpec()
# define data with 3 spatial dimensions, otherwise the same net
n.data = L.Input(shape=dict(dim=[2, 3, 100, 100, 100]))
n.conv = L.Convolution(
n.data, num_output=10, kernel_size=[3, 3, 3], stride=[1, 1, 1],
pad=[0, 1, 2])
n.pool = L.Pooling(
n.conv, pool=P.Pooling.MAX, kernel_size=2, stride=2, pad=0)
n.deconv = L.Deconvolution(
n.pool, num_output=10, kernel_size=4, stride=2, pad=0)
ax, a, b = coord_map_from_to(n.deconv, n.data)
self.assertEquals(ax, 1)
self.assertTrue(len(a) == len(b))
self.assertTrue(np.all(a == 1))
self.assertEquals(b[0] - 1, b[1])
self.assertEquals(b[1] - 1, b[2])
def test_crop_of_crop(self):
"""
Map coordinates through Crop layer:
crop an already-cropped output to the input and check change in offset.
"""
n = coord_net_spec()
offset = random.randint(0, 10)
ax, a, b = coord_map_from_to(n.deconv, n.data)
n.crop = L.Crop(n.deconv, n.data, axis=2, offset=offset)
ax_crop, a_crop, b_crop = coord_map_from_to(n.crop, n.data)
self.assertEquals(ax, ax_crop)
self.assertEquals(a, a_crop)
self.assertEquals(b + offset, b_crop)
def test_crop_helper(self):
"""
Define Crop layer by crop().
"""
n = coord_net_spec()
crop(n.deconv, n.data)
def test_catch_unconnected(self):
"""
Catch mapping spatially unconnected tops.
"""
n = coord_net_spec()
n.ip = L.InnerProduct(n.deconv, num_output=10)
with self.assertRaises(RuntimeError):
coord_map_from_to(n.ip, n.data)
def test_catch_scale_mismatch(self):
"""
Catch incompatible scales, such as when the top to be cropped
is mapped to a differently strided reference top.
"""
n = coord_net_spec(pool=3, dstride=2) # pool 3x but deconv 2x
with self.assertRaises(AssertionError):
crop(n.deconv, n.data)
def test_catch_negative_crop(self):
"""
Catch impossible offsets, such as when the top to be cropped
is mapped to a larger reference top.
"""
n = coord_net_spec(dpad=10) # make output smaller than input
with self.assertRaises(AssertionError):
crop(n.deconv, n.data)
| 6,894 | 34.725389 | 79 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/caffe/test/test_python_layer_with_param_str.py | import unittest
import tempfile
import os
import six
import caffe
class SimpleParamLayer(caffe.Layer):
"""A layer that just multiplies by the numeric value of its param string"""
def setup(self, bottom, top):
try:
self.value = float(self.param_str)
except ValueError:
raise ValueError("Parameter string must be a legible float")
def reshape(self, bottom, top):
top[0].reshape(*bottom[0].data.shape)
def forward(self, bottom, top):
top[0].data[...] = self.value * bottom[0].data
def backward(self, top, propagate_down, bottom):
bottom[0].diff[...] = self.value * top[0].diff
def python_param_net_file():
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write("""name: 'pythonnet' force_backward: true
input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }
layer { type: 'Python' name: 'mul10' bottom: 'data' top: 'mul10'
python_param { module: 'test_python_layer_with_param_str'
layer: 'SimpleParamLayer' param_str: '10' } }
layer { type: 'Python' name: 'mul2' bottom: 'mul10' top: 'mul2'
python_param { module: 'test_python_layer_with_param_str'
layer: 'SimpleParamLayer' param_str: '2' } }""")
return f.name
@unittest.skipIf('Python' not in caffe.layer_type_list(),
'Caffe built without Python layer support')
class TestLayerWithParam(unittest.TestCase):
def setUp(self):
net_file = python_param_net_file()
self.net = caffe.Net(net_file, caffe.TRAIN)
os.remove(net_file)
def test_forward(self):
x = 8
self.net.blobs['data'].data[...] = x
self.net.forward()
for y in self.net.blobs['mul2'].data.flat:
self.assertEqual(y, 2 * 10 * x)
def test_backward(self):
x = 7
self.net.blobs['mul2'].diff[...] = x
self.net.backward()
for y in self.net.blobs['data'].diff.flat:
self.assertEqual(y, 2 * 10 * x)
| 2,031 | 31.774194 | 79 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/caffe/test/test_io.py | import numpy as np
import unittest
import caffe
class TestBlobProtoToArray(unittest.TestCase):
def test_old_format(self):
data = np.zeros((10,10))
blob = caffe.proto.caffe_pb2.BlobProto()
blob.data.extend(list(data.flatten()))
shape = (1,1,10,10)
blob.num, blob.channels, blob.height, blob.width = shape
arr = caffe.io.blobproto_to_array(blob)
self.assertEqual(arr.shape, shape)
def test_new_format(self):
data = np.zeros((10,10))
blob = caffe.proto.caffe_pb2.BlobProto()
blob.data.extend(list(data.flatten()))
blob.shape.dim.extend(list(data.shape))
arr = caffe.io.blobproto_to_array(blob)
self.assertEqual(arr.shape, data.shape)
def test_no_shape(self):
data = np.zeros((10,10))
blob = caffe.proto.caffe_pb2.BlobProto()
blob.data.extend(list(data.flatten()))
with self.assertRaises(ValueError):
caffe.io.blobproto_to_array(blob)
def test_scalar(self):
data = np.ones((1)) * 123
blob = caffe.proto.caffe_pb2.BlobProto()
blob.data.extend(list(data.flatten()))
arr = caffe.io.blobproto_to_array(blob)
self.assertEqual(arr, 123)
class TestArrayToDatum(unittest.TestCase):
def test_label_none_size(self):
# Set label
d1 = caffe.io.array_to_datum(
np.ones((10,10,3)), label=1)
# Don't set label
d2 = caffe.io.array_to_datum(
np.ones((10,10,3)))
# Not setting the label should result in a smaller object
self.assertGreater(
len(d1.SerializeToString()),
len(d2.SerializeToString()))
| 1,694 | 28.736842 | 65 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/caffe/test/test_solver.py | import unittest
import tempfile
import os
import numpy as np
import six
import caffe
from test_net import simple_net_file
class TestSolver(unittest.TestCase):
def setUp(self):
self.num_output = 13
net_f = simple_net_file(self.num_output)
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.write("""net: '""" + net_f + """'
test_iter: 10 test_interval: 10 base_lr: 0.01 momentum: 0.9
weight_decay: 0.0005 lr_policy: 'inv' gamma: 0.0001 power: 0.75
display: 100 max_iter: 100 snapshot_after_train: false
snapshot_prefix: "model" """)
f.close()
self.solver = caffe.SGDSolver(f.name)
# also make sure get_solver runs
caffe.get_solver(f.name)
caffe.set_mode_cpu()
# fill in valid labels
self.solver.net.blobs['label'].data[...] = \
np.random.randint(self.num_output,
size=self.solver.net.blobs['label'].data.shape)
self.solver.test_nets[0].blobs['label'].data[...] = \
np.random.randint(self.num_output,
size=self.solver.test_nets[0].blobs['label'].data.shape)
os.remove(f.name)
os.remove(net_f)
def test_solve(self):
self.assertEqual(self.solver.iter, 0)
self.solver.solve()
self.assertEqual(self.solver.iter, 100)
def test_net_memory(self):
"""Check that nets survive after the solver is destroyed."""
nets = [self.solver.net] + list(self.solver.test_nets)
self.assertEqual(len(nets), 2)
del self.solver
total = 0
for net in nets:
for ps in six.itervalues(net.params):
for p in ps:
total += p.data.sum() + p.diff.sum()
for bl in six.itervalues(net.blobs):
total += bl.data.sum() + bl.diff.sum()
def test_snapshot(self):
self.solver.snapshot()
# Check that these files exist and then remove them
files = ['model_iter_0.caffemodel', 'model_iter_0.solverstate']
for fn in files:
assert os.path.isfile(fn)
os.remove(fn)
| 2,165 | 33.380952 | 76 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/caffe/test/test_layer_type_list.py | import unittest
import caffe
class TestLayerTypeList(unittest.TestCase):
def test_standard_types(self):
#removing 'Data' from list
for type_name in ['Data', 'Convolution', 'InnerProduct']:
self.assertIn(type_name, caffe.layer_type_list(),
'%s not in layer_type_list()' % type_name)
| 338 | 27.25 | 65 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/caffe/test/test_net.py | import unittest
import tempfile
import os
import numpy as np
import six
from collections import OrderedDict
import caffe
def simple_net_file(num_output):
"""Make a simple net prototxt, based on test_net.cpp, returning the name
of the (temporary) file."""
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.write("""name: 'testnet' force_backward: true
layer { type: 'DummyData' name: 'data' top: 'data' top: 'label'
dummy_data_param { num: 5 channels: 2 height: 3 width: 4
num: 5 channels: 1 height: 1 width: 1
data_filler { type: 'gaussian' std: 1 }
data_filler { type: 'constant' } } }
layer { type: 'Convolution' name: 'conv' bottom: 'data' top: 'conv'
convolution_param { num_output: 11 kernel_size: 2 pad: 3
weight_filler { type: 'gaussian' std: 1 }
bias_filler { type: 'constant' value: 2 } }
param { decay_mult: 1 } param { decay_mult: 0 }
}
layer { type: 'InnerProduct' name: 'ip' bottom: 'conv' top: 'ip_blob'
inner_product_param { num_output: """ + str(num_output) + """
weight_filler { type: 'gaussian' std: 2.5 }
bias_filler { type: 'constant' value: -3 } } }
layer { type: 'SoftmaxWithLoss' name: 'loss' bottom: 'ip_blob' bottom: 'label'
top: 'loss' }""")
f.close()
return f.name
class TestNet(unittest.TestCase):
def setUp(self):
self.num_output = 13
net_file = simple_net_file(self.num_output)
self.net = caffe.Net(net_file, caffe.TRAIN)
# fill in valid labels
self.net.blobs['label'].data[...] = \
np.random.randint(self.num_output,
size=self.net.blobs['label'].data.shape)
os.remove(net_file)
def test_memory(self):
"""Check that holding onto blob data beyond the life of a Net is OK"""
params = sum(map(list, six.itervalues(self.net.params)), [])
blobs = self.net.blobs.values()
del self.net
# now sum everything (forcing all memory to be read)
total = 0
for p in params:
total += p.data.sum() + p.diff.sum()
for bl in blobs:
total += bl.data.sum() + bl.diff.sum()
def test_layer_dict(self):
layer_dict = self.net.layer_dict
self.assertEqual(list(layer_dict.keys()), list(self.net._layer_names))
for i, name in enumerate(self.net._layer_names):
self.assertEqual(layer_dict[name].type,
self.net.layers[i].type)
def test_forward_backward(self):
self.net.forward()
self.net.backward()
def test_forward_start_end(self):
conv_blob=self.net.blobs['conv'];
ip_blob=self.net.blobs['ip_blob'];
sample_data=np.random.uniform(size=conv_blob.data.shape);
sample_data=sample_data.astype(np.float32);
conv_blob.data[:]=sample_data;
forward_blob=self.net.forward(start='ip',end='ip');
self.assertIn('ip_blob',forward_blob);
manual_forward=[];
for i in range(0,conv_blob.data.shape[0]):
dot=np.dot(self.net.params['ip'][0].data,
conv_blob.data[i].reshape(-1));
manual_forward.append(dot+self.net.params['ip'][1].data);
manual_forward=np.array(manual_forward);
np.testing.assert_allclose(ip_blob.data,manual_forward,rtol=1e-3);
def test_backward_start_end(self):
conv_blob=self.net.blobs['conv'];
ip_blob=self.net.blobs['ip_blob'];
sample_data=np.random.uniform(size=ip_blob.data.shape)
sample_data=sample_data.astype(np.float32);
ip_blob.diff[:]=sample_data;
backward_blob=self.net.backward(start='ip',end='ip');
self.assertIn('conv',backward_blob);
manual_backward=[];
for i in range(0,conv_blob.data.shape[0]):
dot=np.dot(self.net.params['ip'][0].data.transpose(),
sample_data[i].reshape(-1));
manual_backward.append(dot);
manual_backward=np.array(manual_backward);
manual_backward=manual_backward.reshape(conv_blob.data.shape);
np.testing.assert_allclose(conv_blob.diff,manual_backward,rtol=1e-3);
def test_clear_param_diffs(self):
# Run a forward/backward step to have non-zero diffs
self.net.forward()
self.net.backward()
diff = self.net.params["conv"][0].diff
# Check that we have non-zero diffs
self.assertTrue(diff.max() > 0)
self.net.clear_param_diffs()
# Check that the diffs are now 0
self.assertTrue((diff == 0).all())
def test_inputs_outputs(self):
self.assertEqual(self.net.inputs, [])
self.assertEqual(self.net.outputs, ['loss'])
def test_top_bottom_names(self):
self.assertEqual(self.net.top_names,
OrderedDict([('data', ['data', 'label']),
('conv', ['conv']),
('ip', ['ip_blob']),
('loss', ['loss'])]))
self.assertEqual(self.net.bottom_names,
OrderedDict([('data', []),
('conv', ['data']),
('ip', ['conv']),
('loss', ['ip_blob', 'label'])]))
def test_save_and_read(self):
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.close()
self.net.save(f.name)
net_file = simple_net_file(self.num_output)
# Test legacy constructor
# should print deprecation warning
caffe.Net(net_file, f.name, caffe.TRAIN)
# Test named constructor
net2 = caffe.Net(net_file, caffe.TRAIN, weights=f.name)
os.remove(net_file)
os.remove(f.name)
for name in self.net.params:
for i in range(len(self.net.params[name])):
self.assertEqual(abs(self.net.params[name][i].data
- net2.params[name][i].data).sum(), 0)
def test_save_hdf5(self):
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.close()
self.net.save_hdf5(f.name)
net_file = simple_net_file(self.num_output)
net2 = caffe.Net(net_file, caffe.TRAIN)
net2.load_hdf5(f.name)
os.remove(net_file)
os.remove(f.name)
for name in self.net.params:
for i in range(len(self.net.params[name])):
self.assertEqual(abs(self.net.params[name][i].data
- net2.params[name][i].data).sum(), 0)
class TestLevels(unittest.TestCase):
TEST_NET = """
layer {
name: "data"
type: "DummyData"
top: "data"
dummy_data_param { shape { dim: 1 dim: 1 dim: 10 dim: 10 } }
}
layer {
name: "NoLevel"
type: "InnerProduct"
bottom: "data"
top: "NoLevel"
inner_product_param { num_output: 1 }
}
layer {
name: "Level0Only"
type: "InnerProduct"
bottom: "data"
top: "Level0Only"
include { min_level: 0 max_level: 0 }
inner_product_param { num_output: 1 }
}
layer {
name: "Level1Only"
type: "InnerProduct"
bottom: "data"
top: "Level1Only"
include { min_level: 1 max_level: 1 }
inner_product_param { num_output: 1 }
}
layer {
name: "Level>=0"
type: "InnerProduct"
bottom: "data"
top: "Level>=0"
include { min_level: 0 }
inner_product_param { num_output: 1 }
}
layer {
name: "Level>=1"
type: "InnerProduct"
bottom: "data"
top: "Level>=1"
include { min_level: 1 }
inner_product_param { num_output: 1 }
}
"""
def setUp(self):
self.f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
self.f.write(self.TEST_NET)
self.f.close()
def tearDown(self):
os.remove(self.f.name)
def check_net(self, net, blobs):
net_blobs = [b for b in net.blobs.keys() if 'data' not in b]
self.assertEqual(net_blobs, blobs)
def test_0(self):
net = caffe.Net(self.f.name, caffe.TEST)
self.check_net(net, ['NoLevel', 'Level0Only', 'Level>=0'])
def test_1(self):
net = caffe.Net(self.f.name, caffe.TEST, level=1)
self.check_net(net, ['NoLevel', 'Level1Only', 'Level>=0', 'Level>=1'])
class TestStages(unittest.TestCase):
TEST_NET = """
layer {
name: "data"
type: "DummyData"
top: "data"
dummy_data_param { shape { dim: 1 dim: 1 dim: 10 dim: 10 } }
}
layer {
name: "A"
type: "InnerProduct"
bottom: "data"
top: "A"
include { stage: "A" }
inner_product_param { num_output: 1 }
}
layer {
name: "B"
type: "InnerProduct"
bottom: "data"
top: "B"
include { stage: "B" }
inner_product_param { num_output: 1 }
}
layer {
name: "AorB"
type: "InnerProduct"
bottom: "data"
top: "AorB"
include { stage: "A" }
include { stage: "B" }
inner_product_param { num_output: 1 }
}
layer {
name: "AandB"
type: "InnerProduct"
bottom: "data"
top: "AandB"
include { stage: "A" stage: "B" }
inner_product_param { num_output: 1 }
}
"""
def setUp(self):
self.f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
self.f.write(self.TEST_NET)
self.f.close()
def tearDown(self):
os.remove(self.f.name)
def check_net(self, net, blobs):
net_blobs = [b for b in net.blobs.keys() if 'data' not in b]
self.assertEqual(net_blobs, blobs)
def test_A(self):
net = caffe.Net(self.f.name, caffe.TEST, stages=['A'])
self.check_net(net, ['A', 'AorB'])
def test_B(self):
net = caffe.Net(self.f.name, caffe.TEST, stages=['B'])
self.check_net(net, ['B', 'AorB'])
def test_AandB(self):
net = caffe.Net(self.f.name, caffe.TEST, stages=['A', 'B'])
self.check_net(net, ['A', 'B', 'AorB', 'AandB'])
class TestAllInOne(unittest.TestCase):
TEST_NET = """
layer {
name: "train_data"
type: "DummyData"
top: "data"
top: "label"
dummy_data_param {
shape { dim: 1 dim: 1 dim: 10 dim: 10 }
shape { dim: 1 dim: 1 dim: 1 dim: 1 }
}
include { phase: TRAIN stage: "train" }
}
layer {
name: "val_data"
type: "DummyData"
top: "data"
top: "label"
dummy_data_param {
shape { dim: 1 dim: 1 dim: 10 dim: 10 }
shape { dim: 1 dim: 1 dim: 1 dim: 1 }
}
include { phase: TEST stage: "val" }
}
layer {
name: "deploy_data"
type: "Input"
top: "data"
input_param { shape { dim: 1 dim: 1 dim: 10 dim: 10 } }
include { phase: TEST stage: "deploy" }
}
layer {
name: "ip"
type: "InnerProduct"
bottom: "data"
top: "ip"
inner_product_param { num_output: 2 }
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip"
bottom: "label"
top: "loss"
include: { phase: TRAIN stage: "train" }
include: { phase: TEST stage: "val" }
}
layer {
name: "pred"
type: "Softmax"
bottom: "ip"
top: "pred"
include: { phase: TEST stage: "deploy" }
}
"""
def setUp(self):
self.f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
self.f.write(self.TEST_NET)
self.f.close()
def tearDown(self):
os.remove(self.f.name)
def check_net(self, net, outputs):
self.assertEqual(list(net.blobs['data'].shape), [1,1,10,10])
self.assertEqual(net.outputs, outputs)
def test_train(self):
net = caffe.Net(self.f.name, caffe.TRAIN, stages=['train'])
self.check_net(net, ['loss'])
def test_val(self):
net = caffe.Net(self.f.name, caffe.TEST, stages=['val'])
self.check_net(net, ['loss'])
def test_deploy(self):
net = caffe.Net(self.f.name, caffe.TEST, stages=['deploy'])
self.check_net(net, ['pred'])
| 11,640 | 28.848718 | 82 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/caffe/test/test_draw.py | import os
import unittest
from google.protobuf import text_format
import caffe.draw
from caffe.proto import caffe_pb2
def getFilenames():
"""Yields files in the source tree which are Net prototxts."""
result = []
root_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..'))
assert os.path.exists(root_dir)
for dirname in ('models', 'examples'):
dirname = os.path.join(root_dir, dirname)
assert os.path.exists(dirname)
for cwd, _, filenames in os.walk(dirname):
for filename in filenames:
filename = os.path.join(cwd, filename)
if filename.endswith('.prototxt') and 'solver' not in filename:
yield os.path.join(dirname, filename)
class TestDraw(unittest.TestCase):
def test_draw_net(self):
for filename in getFilenames():
net = caffe_pb2.NetParameter()
with open(filename) as infile:
text_format.Merge(infile.read(), net)
caffe.draw.draw_net(net, 'LR')
if __name__ == "__main__":
unittest.main()
| 1,114 | 28.342105 | 79 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/caffe/test/test_nccl.py | import sys
import unittest
import caffe
class TestNCCL(unittest.TestCase):
def test_newuid(self):
"""
Test that NCCL uids are of the proper type
according to python version
"""
if caffe.has_nccl():
uid = caffe.NCCL.new_uid()
if sys.version_info.major >= 3:
self.assertTrue(isinstance(uid, bytes))
else:
self.assertTrue(isinstance(uid, str))
| 457 | 21.9 | 55 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/caffe/test/test_net_spec.py | import unittest
import tempfile
import caffe
from caffe import layers as L
from caffe import params as P
def lenet(batch_size):
n = caffe.NetSpec()
n.data, n.label = L.DummyData(shape=[dict(dim=[batch_size, 1, 28, 28]),
dict(dim=[batch_size, 1, 1, 1])],
transform_param=dict(scale=1./255), ntop=2)
n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20,
weight_filler=dict(type='xavier'))
n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50,
weight_filler=dict(type='xavier'))
n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
n.ip1 = L.InnerProduct(n.pool2, num_output=500,
weight_filler=dict(type='xavier'))
n.relu1 = L.ReLU(n.ip1, in_place=True)
n.ip2 = L.InnerProduct(n.relu1, num_output=10,
weight_filler=dict(type='xavier'))
n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
return n.to_proto()
def anon_lenet(batch_size):
data, label = L.DummyData(shape=[dict(dim=[batch_size, 1, 28, 28]),
dict(dim=[batch_size, 1, 1, 1])],
transform_param=dict(scale=1./255), ntop=2)
conv1 = L.Convolution(data, kernel_size=5, num_output=20,
weight_filler=dict(type='xavier'))
pool1 = L.Pooling(conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
conv2 = L.Convolution(pool1, kernel_size=5, num_output=50,
weight_filler=dict(type='xavier'))
pool2 = L.Pooling(conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
ip1 = L.InnerProduct(pool2, num_output=500,
weight_filler=dict(type='xavier'))
relu1 = L.ReLU(ip1, in_place=True)
ip2 = L.InnerProduct(relu1, num_output=10,
weight_filler=dict(type='xavier'))
loss = L.SoftmaxWithLoss(ip2, label)
return loss.to_proto()
def silent_net():
n = caffe.NetSpec()
n.data, n.data2 = L.DummyData(shape=dict(dim=3), ntop=2)
n.silence_data = L.Silence(n.data, ntop=0)
n.silence_data2 = L.Silence(n.data2, ntop=0)
return n.to_proto()
class TestNetSpec(unittest.TestCase):
def load_net(self, net_proto):
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.write(str(net_proto))
f.close()
return caffe.Net(f.name, caffe.TEST)
def test_lenet(self):
"""Construct and build the Caffe version of LeNet."""
net_proto = lenet(50)
# check that relu is in-place
self.assertEqual(net_proto.layer[6].bottom,
net_proto.layer[6].top)
net = self.load_net(net_proto)
# check that all layers are present
self.assertEqual(len(net.layers), 9)
# now the check the version with automatically-generated layer names
net_proto = anon_lenet(50)
self.assertEqual(net_proto.layer[6].bottom,
net_proto.layer[6].top)
net = self.load_net(net_proto)
self.assertEqual(len(net.layers), 9)
def test_zero_tops(self):
"""Test net construction for top-less layers."""
net_proto = silent_net()
net = self.load_net(net_proto)
self.assertEqual(len(net.forward()), 0)
def test_type_error(self):
"""Test that a TypeError is raised when a Function input isn't a Top."""
data = L.DummyData(ntop=2) # data is a 2-tuple of Tops
r = r"^Silence input 0 is not a Top \(type is <(type|class) 'tuple'>\)$"
with self.assertRaisesRegexp(TypeError, r):
L.Silence(data, ntop=0) # should raise: data is a tuple, not a Top
L.Silence(*data, ntop=0) # shouldn't raise: each elt of data is a Top
| 3,756 | 40.744444 | 80 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/python/caffe/test/test_python_layer.py | import unittest
import tempfile
import os
import six
import caffe
class SimpleLayer(caffe.Layer):
"""A layer that just multiplies by ten"""
def setup(self, bottom, top):
pass
def reshape(self, bottom, top):
top[0].reshape(*bottom[0].data.shape)
def forward(self, bottom, top):
top[0].data[...] = 10 * bottom[0].data
def backward(self, top, propagate_down, bottom):
bottom[0].diff[...] = 10 * top[0].diff
class ExceptionLayer(caffe.Layer):
"""A layer for checking exceptions from Python"""
def setup(self, bottom, top):
raise RuntimeError
class ParameterLayer(caffe.Layer):
"""A layer that just multiplies by ten"""
def setup(self, bottom, top):
self.blobs.add_blob(1)
self.blobs[0].data[0] = 0
def reshape(self, bottom, top):
top[0].reshape(*bottom[0].data.shape)
def forward(self, bottom, top):
pass
def backward(self, top, propagate_down, bottom):
self.blobs[0].diff[0] = 1
class PhaseLayer(caffe.Layer):
"""A layer for checking attribute `phase`"""
def setup(self, bottom, top):
pass
def reshape(self, bootom, top):
top[0].reshape()
def forward(self, bottom, top):
top[0].data[()] = self.phase
def python_net_file():
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write("""name: 'pythonnet' force_backward: true
input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }
layer { type: 'Python' name: 'one' bottom: 'data' top: 'one'
python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }
layer { type: 'Python' name: 'two' bottom: 'one' top: 'two'
python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }
layer { type: 'Python' name: 'three' bottom: 'two' top: 'three'
python_param { module: 'test_python_layer' layer: 'SimpleLayer' } }""")
return f.name
def exception_net_file():
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write("""name: 'pythonnet' force_backward: true
input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }
layer { type: 'Python' name: 'layer' bottom: 'data' top: 'top'
python_param { module: 'test_python_layer' layer: 'ExceptionLayer' } }
""")
return f.name
def parameter_net_file():
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write("""name: 'pythonnet' force_backward: true
input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }
layer { type: 'Python' name: 'layer' bottom: 'data' top: 'top'
python_param { module: 'test_python_layer' layer: 'ParameterLayer' } }
""")
return f.name
def phase_net_file():
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write("""name: 'pythonnet' force_backward: true
layer { type: 'Python' name: 'layer' top: 'phase'
python_param { module: 'test_python_layer' layer: 'PhaseLayer' } }
""")
return f.name
@unittest.skipIf('Python' not in caffe.layer_type_list(),
'Caffe built without Python layer support')
class TestPythonLayer(unittest.TestCase):
def setUp(self):
net_file = python_net_file()
self.net = caffe.Net(net_file, caffe.TRAIN)
os.remove(net_file)
def test_forward(self):
x = 8
self.net.blobs['data'].data[...] = x
self.net.forward()
for y in self.net.blobs['three'].data.flat:
self.assertEqual(y, 10**3 * x)
def test_backward(self):
x = 7
self.net.blobs['three'].diff[...] = x
self.net.backward()
for y in self.net.blobs['data'].diff.flat:
self.assertEqual(y, 10**3 * x)
def test_reshape(self):
s = 4
self.net.blobs['data'].reshape(s, s, s, s)
self.net.forward()
for blob in six.itervalues(self.net.blobs):
for d in blob.data.shape:
self.assertEqual(s, d)
def test_exception(self):
net_file = exception_net_file()
self.assertRaises(RuntimeError, caffe.Net, net_file, caffe.TEST)
os.remove(net_file)
def test_parameter(self):
net_file = parameter_net_file()
net = caffe.Net(net_file, caffe.TRAIN)
# Test forward and backward
net.forward()
net.backward()
layer = net.layers[list(net._layer_names).index('layer')]
self.assertEqual(layer.blobs[0].data[0], 0)
self.assertEqual(layer.blobs[0].diff[0], 1)
layer.blobs[0].data[0] += layer.blobs[0].diff[0]
self.assertEqual(layer.blobs[0].data[0], 1)
# Test saving and loading
h, caffemodel_file = tempfile.mkstemp()
net.save(caffemodel_file)
layer.blobs[0].data[0] = -1
self.assertEqual(layer.blobs[0].data[0], -1)
net.copy_from(caffemodel_file)
self.assertEqual(layer.blobs[0].data[0], 1)
os.remove(caffemodel_file)
# Test weight sharing
net2 = caffe.Net(net_file, caffe.TRAIN)
net2.share_with(net)
layer = net.layers[list(net2._layer_names).index('layer')]
self.assertEqual(layer.blobs[0].data[0], 1)
os.remove(net_file)
def test_phase(self):
net_file = phase_net_file()
for phase in caffe.TRAIN, caffe.TEST:
net = caffe.Net(net_file, phase)
self.assertEqual(net.forward()['phase'], phase)
| 5,510 | 31.609467 | 81 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/scripts/cpp_lint.py | #!/usr/bin/env python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
import six
from six import iteritems, itervalues
from six.moves import xrange
_USAGE = """
Syntax: cpp_lint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--linelength=digits]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
extensions with the --extensions flag.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
root=subdir
The root directory used for deriving header guard CPP variable.
By default, the header guard CPP variable is calculated as the relative
path to the directory that contains .git, .hg, or .svn. When this flag
is specified, the relative path is calculated from the specified
directory. If the specified directory does not exist, this flag is
ignored.
Examples:
Assuing that src/.git exists, the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=hpp,cpp
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_dir',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'caffe/alt_fn',
'caffe/data_layer_setup',
'caffe/random_fn',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/streams',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo'
]
# The default state of the category filter. This is overrided by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = [
'-build/include_dir',
'-readability/todo',
]
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
_regexp_compile_cache = {}
# Finds occurrences of NOLINT[_NEXT_LINE] or NOLINT[_NEXT_LINE](...).
_RE_SUPPRESSION = re.compile(r'\bNOLINT(_NEXT_LINE)?\b(\([^)]*\))?')
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# Finds Copyright.
_RE_COPYRIGHT = re.compile(r'Copyright')
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
# The allowed extensions for file names
# This is set by --extensions flag.
_valid_extensions = set(['cc', 'h', 'cpp', 'hpp', 'cu', 'cuh'])
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
# FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*).
matched = _RE_SUPPRESSION.search(raw_line)
if matched:
if matched.group(1) == '_NEXT_LINE':
linenum += 1
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(linenum)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(linenum)
else:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
"Resets the set of NOLINT suppressions to empty."
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(dict):
"""Tracks line numbers for includes, and the order in which includes appear.
As a dict, an _IncludeState object serves as a mapping between include
filename and line number on which that file was included.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
dict.__init__(self)
self.ResetSection()
def ResetSection(self):
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
not Match(r'^\s*$', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in iteritems(self.errors_by_category):
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo:
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Matches strings. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
# Matches characters. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
# Matches multi-line C++ comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r"""(\s*/\*.*\*/\s*$|
/\*.*\*/\s+|
\s+/\*.*\*/(?=\W)|
/\*.*\*/)""", re.VERBOSE)
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = ''
else:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if matched:
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw_lines member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if not _RE_PATTERN_INCLUDE.match(elided):
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
return elided
def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar):
"""Find the position just after the matching endchar.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
depth: nesting level at startpos.
startchar: expression opening character.
endchar: expression closing character.
Returns:
On finding matching endchar: (index just after matching endchar, 0)
Otherwise: (-1, new depth at end of this line)
"""
for i in xrange(startpos, len(line)):
if line[i] == startchar:
depth += 1
elif line[i] == endchar:
depth -= 1
if depth == 0:
return (i + 1, 0)
return (-1, depth)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
startchar = line[pos]
if startchar not in '({[<':
return (line, clean_lines.NumLines(), -1)
if startchar == '(': endchar = ')'
if startchar == '[': endchar = ']'
if startchar == '{': endchar = '}'
if startchar == '<': endchar = '>'
# Check first line
(end_pos, num_open) = FindEndOfExpressionInLine(
line, pos, 0, startchar, endchar)
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, num_open) = FindEndOfExpressionInLine(
line, 0, num_open, startchar, endchar)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find endchar before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar):
"""Find position at the matching startchar.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
depth: nesting level at endpos.
startchar: expression opening character.
endchar: expression closing character.
Returns:
On finding matching startchar: (index at matching startchar, 0)
Otherwise: (-1, new depth at beginning of this line)
"""
for i in xrange(endpos, -1, -1):
if line[i] == endchar:
depth += 1
elif line[i] == startchar:
depth -= 1
if depth == 0:
return (i, 0)
return (-1, depth)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
endchar = line[pos]
if endchar not in ')}]>':
return (line, 0, -1)
if endchar == ')': startchar = '('
if endchar == ']': startchar = '['
if endchar == '}': startchar = '{'
if endchar == '>': startchar = '<'
# Check last line
(start_pos, num_open) = FindStartOfExpressionInLine(
line, pos, 0, startchar, endchar)
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, num_open) = FindStartOfExpressionInLine(
line, len(line) - 1, num_open, startchar, endchar)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find startchar before beginning of file, give up
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if a Copyright message appears at the top of the file."""
# We'll check up to line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if _RE_COPYRIGHT.search(lines[line], re.I):
error(filename, 0, 'legal/copyright', 5,
'Copyright message found. '
'You should not include a copyright line.')
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
if not define:
error(filename, 0, 'build/header_guard', 5,
'No #define header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
if define != ifndef:
error(filename, 0, 'build/header_guard', 5,
'#ifndef and #define don\'t match, suggested CPP variable is: %s' %
cppvar)
return
if endif != ('#endif // %s' % cppvar):
error_level = 0
if endif != ('#endif // %s' % (cppvar + '_')):
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
error)
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar)
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
caffe_alt_function_list = (
('memset', ['caffe_set', 'caffe_memset']),
('cudaMemset', ['caffe_gpu_set', 'caffe_gpu_memset']),
('memcpy', ['caffe_copy']),
('cudaMemcpy', ['caffe_copy', 'caffe_gpu_memcpy']),
)
def CheckCaffeAlternatives(filename, clean_lines, linenum, error):
"""Checks for C(++) functions for which a Caffe substitute should be used.
For certain native C functions (memset, memcpy), there is a Caffe alternative
which should be used instead.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for function, alts in caffe_alt_function_list:
ix = line.find(function + '(')
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
disp_alts = ['%s(...)' % alt for alt in alts]
error(filename, linenum, 'caffe/alt_fn', 2,
'Use Caffe function %s instead of %s(...).' %
(' or '.join(disp_alts), function))
def CheckCaffeDataLayerSetUp(filename, clean_lines, linenum, error):
"""Except the base classes, Caffe DataLayer should define DataLayerSetUp
instead of LayerSetUp.
The base DataLayers define common SetUp steps, the subclasses should
not override them.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
ix = line.find('DataLayer<Dtype>::LayerSetUp')
if ix >= 0 and (
line.find('void DataLayer<Dtype>::LayerSetUp') != -1 or
line.find('void ImageDataLayer<Dtype>::LayerSetUp') != -1 or
line.find('void MemoryDataLayer<Dtype>::LayerSetUp') != -1 or
line.find('void WindowDataLayer<Dtype>::LayerSetUp') != -1):
error(filename, linenum, 'caffe/data_layer_setup', 2,
'Except the base classes, Caffe DataLayer should define'
+ ' DataLayerSetUp instead of LayerSetUp. The base DataLayers'
+ ' define common SetUp steps, the subclasses should'
+ ' not override them.')
ix = line.find('DataLayer<Dtype>::DataLayerSetUp')
if ix >= 0 and (
line.find('void Base') == -1 and
line.find('void DataLayer<Dtype>::DataLayerSetUp') == -1 and
line.find('void ImageDataLayer<Dtype>::DataLayerSetUp') == -1 and
line.find('void MemoryDataLayer<Dtype>::DataLayerSetUp') == -1 and
line.find('void WindowDataLayer<Dtype>::DataLayerSetUp') == -1):
error(filename, linenum, 'caffe/data_layer_setup', 2,
'Except the base classes, Caffe DataLayer should define'
+ ' DataLayerSetUp instead of LayerSetUp. The base DataLayers'
+ ' define common SetUp steps, the subclasses should'
+ ' not override them.')
c_random_function_list = (
'rand(',
'rand_r(',
'random(',
)
def CheckCaffeRandom(filename, clean_lines, linenum, error):
"""Checks for calls to C random functions (rand, rand_r, random, ...).
Caffe code should (almost) always use the caffe_rng_* functions rather
than these, as the internal state of these C functions is independent of the
native Caffe RNG system which should produce deterministic results for a
fixed Caffe seed set using Caffe::set_random_seed(...).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for function in c_random_function_list:
ix = line.find(function)
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
error(filename, linenum, 'caffe/random_fn', 2,
'Use caffe_rng_rand() (or other caffe_rng_* function) instead of '
+ function +
') to ensure results are deterministic for a fixed Caffe seed.')
threading_list = (
('asctime(', 'asctime_r('),
('ctime(', 'ctime_r('),
('getgrgid(', 'getgrgid_r('),
('getgrnam(', 'getgrnam_r('),
('getlogin(', 'getlogin_r('),
('getpwnam(', 'getpwnam_r('),
('getpwuid(', 'getpwuid_r('),
('gmtime(', 'gmtime_r('),
('localtime(', 'localtime_r('),
('strtok(', 'strtok_r('),
('ttyname(', 'ttyname_r('),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_function, multithread_safe_function in threading_list:
ix = line.find(single_thread_function)
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_function +
'...) instead of ' + single_thread_function +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, seen_open_brace):
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, False)
self.name = name
self.starting_linenum = linenum
self.is_derived = False
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
initial_indent = Match(r'^( *)\S', clean_lines.raw_lines[linenum])
if initial_indent:
self.class_indent = len(initial_indent.group(1))
else:
self.class_indent = 0
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, False)
self.name = name or ''
self.starting_linenum = linenum
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class _NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Update pp_stack first
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
#
# Templates with class arguments may confuse the parser, for example:
# template <class T
# class Comparator = less<T>,
# class Vector = vector<T> >
# class HeapQueue {
#
# Because this parser has no nesting state about templates, by the
# time it saw "class Comparator", it may think that it's a new class.
# Nested templates have a similar problem:
# template <
# typename ExportedType,
# typename TupleType,
# template <typename, typename> class ImplTemplate>
#
# To avoid these cases, we ignore classes that are followed by '=' or '>'
class_decl_match = Match(
r'\s*(template\s*<[\w\s<>,:]*>\s*)?'
r'(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)'
r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\s*>)*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
self.stack.append(_ClassInfo(
class_decl_match.group(4), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(5)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
else:
self.stack.append(_BlockInfo(True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)'
% re.escape(base_classname),
line)
if (args and
args.group(1) != 'void' and
not Match(r'(const\s+)?%s(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), args.group(1).strip())):
error(filename, linenum, 'runtime/explicit', 5,
'Single-argument constructors should be marked explicit.')
def CheckSpacingForFunctionCall(filename, line, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
line: The text of the line to check.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)):
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
raw = clean_lines.raw_lines
raw_line = raw[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(comment, filename, linenum, error):
"""Checks for common mistakes in TODO comments.
Args:
comment: The text of the comment from the line in question.
filename: The name of the current file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
"""Checks for improper use of DISALLOW* macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
r'DISALLOW_EVIL_CONSTRUCTORS|'
r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
if not matched:
return
if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
if nesting_state.stack[-1].access != 'private':
error(filename, linenum, 'readability/constructors', 3,
'%s must be in the private: section' % matched.group(1))
else:
# Found DISALLOW* macro outside a class declaration, or perhaps it
# was used inside a function when it should have been part of the
# class declaration. We could issue a warning here, but it
# probably resulted in a compiler error already.
pass
def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix):
"""Find the corresponding > to close a template.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: Current line number.
init_suffix: Remainder of the current line after the initial <.
Returns:
True if a matching bracket exists.
"""
line = init_suffix
nesting_stack = ['<']
while True:
# Find the next operator that can tell us whether < is used as an
# opening bracket or as a less-than operator. We only want to
# warn on the latter case.
#
# We could also check all other operators and terminate the search
# early, e.g. if we got something like this "a<b+c", the "<" is
# most likely a less-than operator, but then we will get false
# positives for default arguments and other template expressions.
match = Search(r'^[^<>(),;\[\]]*([<>(),;\[\]])(.*)$', line)
if match:
# Found an operator, update nesting stack
operator = match.group(1)
line = match.group(2)
if nesting_stack[-1] == '<':
# Expecting closing angle bracket
if operator in ('<', '(', '['):
nesting_stack.append(operator)
elif operator == '>':
nesting_stack.pop()
if not nesting_stack:
# Found matching angle bracket
return True
elif operator == ',':
# Got a comma after a bracket, this is most likely a template
# argument. We have not seen a closing angle bracket yet, but
# it's probably a few lines later if we look for it, so just
# return early here.
return True
else:
# Got some other operator.
return False
else:
# Expecting closing parenthesis or closing bracket
if operator in ('<', '(', '['):
nesting_stack.append(operator)
elif operator in (')', ']'):
# We don't bother checking for matching () or []. If we got
# something like (] or [), it would have been a syntax error.
nesting_stack.pop()
else:
# Scan the next line
linenum += 1
if linenum >= len(clean_lines.elided):
break
line = clean_lines.elided[linenum]
# Exhausted all remaining lines and still no matching angle bracket.
# Most likely the input was incomplete, otherwise we should have
# seen a semicolon and returned early.
return True
def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix):
"""Find the corresponding < that started a template.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: Current line number.
init_prefix: Part of the current line before the initial >.
Returns:
True if a matching bracket exists.
"""
line = init_prefix
nesting_stack = ['>']
while True:
# Find the previous operator
match = Search(r'^(.*)([<>(),;\[\]])[^<>(),;\[\]]*$', line)
if match:
# Found an operator, update nesting stack
operator = match.group(2)
line = match.group(1)
if nesting_stack[-1] == '>':
# Expecting opening angle bracket
if operator in ('>', ')', ']'):
nesting_stack.append(operator)
elif operator == '<':
nesting_stack.pop()
if not nesting_stack:
# Found matching angle bracket
return True
elif operator == ',':
# Got a comma before a bracket, this is most likely a
# template argument. The opening angle bracket is probably
# there if we look for it, so just return early here.
return True
else:
# Got some other operator.
return False
else:
# Expecting opening parenthesis or opening bracket
if operator in ('>', ')', ']'):
nesting_stack.append(operator)
elif operator in ('(', '['):
nesting_stack.pop()
else:
# Scan the previous line
linenum -= 1
if linenum < 0:
break
line = clean_lines.elided[linenum]
# Exhausted all earlier lines and still no matching angle bracket.
return False
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
if IsBlankLine(line) and not nesting_state.InNamespaceBody():
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, we complain if there's a comment too near the text
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not Match(r'^\s*{ //', line) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# There should always be a space between the // and the comment
commentend = commentpos + 2
if commentend < len(line) and not line[commentend] == ' ':
# but some lines are exceptions -- e.g. if they're big
# comment delimiters like:
# //----------------------------------------------------------
# or are an empty C++ style Doxygen comment, like:
# ///
# or C++ style Doxygen comments placed after the variable:
# ///< Header comment
# //!< Header comment
# or they begin with multiple slashes followed by a space:
# //////// Header comment
match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or
Search(r'^/$', line[commentend:]) or
Search(r'^!< ', line[commentend:]) or
Search(r'^/< ', line[commentend:]) or
Search(r'^/+ ', line[commentend:]))
if not match:
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
CheckComment(line[commentpos:], filename, linenum, error)
line = clean_lines.elided[linenum] # get rid of comments and strings
# Don't try to do spacing checks for operator methods
line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
# Also ignore using ns::operator<<;
match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<(\S)', line)
if (match and
not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
elif not Match(r'#.*include', line):
# Avoid false positives on ->
reduced_line = line.replace('->', '')
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Search(r'[^\s<]<([^\s=<].*)', reduced_line)
if (match and
not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1))):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Search(r'^(.*[^\s>])>[^\s=>]', reduced_line)
if (match and
not FindPreviousMatchingAngleBracket(clean_lines, linenum,
match.group(1))):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
# A pet peeve of mine: no spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if Search(r',[^,\s]', line) and Search(r',[^,\s]', raw[linenum]):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
# Next we will look for issues with function calls.
CheckSpacingForFunctionCall(filename, line, linenum, error)
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
match = Match(r'^(.*[^ ({]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<]".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
if not Match(r'^[\s}]*[{.;,)<\]]', trailing_text):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'new char * []'.
if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search('for *\(.*[^:]:[^: ]', line) or
Search('for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\s*', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
if endline[endpos:].find('{') == -1: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
else: # common case: else not followed by a multi-line if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on compound
# literals.
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
Search(r'\s+=\s*$', line_prefix)):
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
check_macro = None
start_pos = -1
for macro in _CHECK_MACROS:
i = lines[linenum].find(macro)
if i >= 0:
check_macro = macro
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + check_macro + r'\s*)\(', lines[linenum])
if not matched:
continue
start_pos = len(matched.group(1))
break
if not check_macro or start_pos < 0:
# Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, 1, '(', ')')
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if six.PY2:
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for section labels
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
extended_length = int((_line_length * 1.25))
if line_width > extended_length:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than %i characters' %
extended_length)
elif line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_h = include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line):
error(filename, linenum, 'build/include_dir', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
if include in include_state:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, include_state[include]))
else:
include_state[include] = linenum
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
if match:
include = match.group(2)
if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# Many unit tests use cout, so we exempt them.
if not _IsTestFilename(filename):
error(filename, linenum, 'readability/streams', 3,
'Streams are highly discouraged.')
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(sugawarayu): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(itervalues(matching_punctuation))
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
if Match(r'^\s*#\s*(?:ifdef|elif|else|endif)\b', line):
include_state.ResetSection()
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# TODO(unknown): figure out if they're using default arguments in fn proto.
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
if match:
matched_new = match.group(1)
matched_type = match.group(2)
matched_funcptr = match.group(3)
# gMock methods are defined using some variant of MOCK_METHODx(name, type)
# where type may be float(), int(string), etc. Without context they are
# virtually indistinguishable from int(x) casts. Likewise, gMock's
# MockCallback takes a template parameter of the form return_type(arg_type),
# which looks much like the cast we're trying to detect.
#
# std::function<> wrapper has a similar problem.
#
# Return types for function pointers also look like casts if they
# don't have an extra space.
if (matched_new is None and # If new operator, then this isn't a cast
not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
Search(r'\bMockCallback<.*>', line) or
Search(r'\bstd::function<.*>', line)) and
not (matched_funcptr and
Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr))):
# Try a bit harder to catch gmock lines: the only place where
# something looks like an old-style cast is where we declare the
# return type of the mocked method, and the only time when we
# are missing context is if MOCK_METHOD was split across
# multiple lines. The missing MOCK_METHOD is usually one or two
# lines back, so scan back one or two lines.
#
# It's not possible for gmock macros to appear in the first 2
# lines, since the class head + section name takes up 2 lines.
if (linenum < 2 or
not (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]))):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'const_cast', r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
match = Search(
r'(?:&\(([^)]+)\)[\w(])|'
r'(?:&(static|dynamic|down|reinterpret)_cast\b)', line)
if match and match.group(1) != '*':
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
# Create an extended_line, which is the concatenation of the current and
# next lines, for more effective checking of code that may span more than one
# line.
if linenum + 1 < clean_lines.NumLines():
extended_line = line + clean_lines.elided[linenum + 1]
else:
extended_line = line
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Make sure it's not a function.
# Function template specialization looks like: "string foo<Type>(...".
# Class template definitions look like: "string Foo<Type>::Method(...".
#
# Also ignore things that look like operators. These are matched separately
# because operator names cross non-word boundaries. If we change the pattern
# above, we would decrease the accuracy of matching identifiers.
if (match and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)', match.group(3))):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\b', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\b', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(sugawarayu): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or
# DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing
# in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
# We allow some, but not all, declarations of variables to be present
# in the statement that defines the class. The [\w\*,\s]* fragment of
# the regular expression below allows users to declare instances of
# the class or pointers to instances, but not less common types such
# as function pointers or arrays. It's a tradeoff between allowing
# reasonable code and avoiding trying to parse more C++ using regexps.
if not Search(r'^\s*}[\w\*,\s]*;', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknwon): Doesn't account for preprocessor directives.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
check_params = False
if not nesting_state.stack:
check_params = True # top level
elif (isinstance(nesting_state.stack[-1], _ClassInfo) or
isinstance(nesting_state.stack[-1], _NamespaceInfo)):
check_params = True # within class or namespace
elif Match(r'.*{\s*$', line):
if (len(nesting_state.stack) == 1 or
isinstance(nesting_state.stack[-2], _ClassInfo) or
isinstance(nesting_state.stack[-2], _NamespaceInfo)):
check_params = True # just opened global/class/namespace block
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(whitelisted_functions, line):
check_params = False
elif not Search(r'\S+\([^)]*$', line):
# Don't see a whitelisted function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
check_params = False
break
if check_params:
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
linenum: The number of the line to check.
line: The line of code to check.
raw_line: The raw line of code to check, with comments.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
match = Search(pattern, line)
if not match:
return False
# Exclude lines with sizeof, since sizeof looks like a cast.
sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1])
if sizeof_match:
return False
# operator++(int) and operator--(int)
if (line[0:match.start(1) - 1].endswith(' operator++') or
line[0:match.start(1) - 1].endswith(' operator--')):
return False
# A single unnamed argument for a function tends to look like old
# style cast. If we see those, don't issue warnings for deprecated
# casts, instead issue warnings for unnamed arguments where
# appropriate.
#
# These are things that we want warnings for, since the style guide
# explicitly require all parameters to be named:
# Function(int);
# Function(int) {
# ConstMember(int) const;
# ConstMember(int) const {
# ExceptionMember(int) throw (...);
# ExceptionMember(int) throw (...) {
# PureVirtual(int) = 0;
#
# These are functions of some sort, where the compiler would be fine
# if they had named parameters, but people often omit those
# identifiers to reduce clutter:
# (FunctionPointer)(int);
# (FunctionPointer)(int) = value;
# Function((function_pointer_arg)(int))
# <TemplateArgument(int)>;
# <(FunctionPointerTemplateArgument)(int)>;
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|=|>|\{|\))', remainder):
# Looks like an unnamed parameter.
# Don't warn on any kind of template arguments.
if Match(r'^\s*>', remainder):
return False
# Don't warn on assignments to function pointers, but keep warnings for
# unnamed parameters to pure virtual functions. Note that this pattern
# will also pass on assignments of "0" to function pointers, but the
# preferred values for those would be "nullptr" or "NULL".
matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
if matched_zero and matched_zero.group(1) != '0':
return False
# Don't warn on function pointer declarations. For this we need
# to check what came before the "(type)" string.
if Match(r'.*\)\s*$', line[0:match.start(0)]):
return False
# Don't warn if the parameter is named with block comments, e.g.:
# Function(int /*unused_param*/);
if '/*' in raw_line:
return False
# Passed all filters, issue warning here.
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cc.endswith('.cc'):
return (False, '')
filename_cc = filename_cc[:-len('.cc')]
if filename_cc.endswith('_unittest'):
filename_cc = filename_cc[:-len('_unittest')]
elif filename_cc.endswith('_test'):
filename_cc = filename_cc[:-len('_test')]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_state, io=codecs):
"""Fill up the include_state with new includes found from the file.
Args:
filename: the name of the header to read.
include_state: an _IncludeState instance in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
# The value formatting is cute, but not really used right now.
# What matters here is that the key is in include_state.
include_state.setdefault(include, '%s:%d' % (filename, linenum))
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's copy the include_state so it is only messed up within this function.
include_state = include_state.copy()
# Did we find the header for this file (if any) and successfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_state is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = include_state.keys()
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_state, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_state:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++0x mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A _NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM:
return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckCaffeAlternatives(filename, clean_lines, line, error)
CheckCaffeDataLayerSetUp(filename, clean_lines, line, error)
CheckCaffeRandom(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = _NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
if file_extension == 'h':
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below. If it is not expected to be present (i.e. os.linesep !=
# '\r\n' as in Windows), a warning is issued below if this file
# is processed.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
carriage_return_found = False
# Remove trailing '\r'.
for linenum in range(len(lines)):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
carriage_return_found = True
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in _valid_extensions:
sys.stderr.write('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(_valid_extensions)))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
if carriage_return_found and os.linesep != '\r\n':
# Use 0 for linenum since outputting only one error for potentially
# several lines.
Error(filename, 0, 'whitespace/newline', 1,
'One or more unexpected \\r (^M) found;'
'better to use only a \\n')
sys.stderr.write('Done processing %s\n' % filename)
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'linelength=',
'extensions='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma separated list.')
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
if six.PY2:
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| 187,569 | 37.483792 | 93 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/scripts/split_caffe_proto.py | #!/usr/bin/env python
import mmap
import re
import os
import errno
script_path = os.path.dirname(os.path.realpath(__file__))
# a regex to match the parameter definitions in caffe.proto
r = re.compile(r'(?://.*\n)*message ([^ ]*) \{\n(?: .*\n|\n)*\}')
# create directory to put caffe.proto fragments
try:
os.mkdir(
os.path.join(script_path,
'../docs/_includes/'))
os.mkdir(
os.path.join(script_path,
'../docs/_includes/proto/'))
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
caffe_proto_fn = os.path.join(
script_path,
'../src/caffe/proto/caffe.proto')
with open(caffe_proto_fn, 'r') as fin:
for m in r.finditer(fin.read(), re.MULTILINE):
fn = os.path.join(
script_path,
'../docs/_includes/proto/%s.txt' % m.group(1))
with open(fn, 'w') as fout:
fout.write(m.group(0))
| 941 | 25.166667 | 65 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/scripts/download_model_binary.py | #!/usr/bin/env python
import os
import sys
import time
import yaml
import hashlib
import argparse
from six.moves import urllib
required_keys = ['caffemodel', 'caffemodel_url', 'sha1']
def reporthook(count, block_size, total_size):
"""
From http://blog.moleculea.com/2012/10/04/urlretrieve-progres-indicator/
"""
global start_time
if count == 0:
start_time = time.time()
return
duration = (time.time() - start_time) or 0.01
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
sys.stdout.write("\r...%d%%, %d MB, %d KB/s, %d seconds passed" %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
def parse_readme_frontmatter(dirname):
readme_filename = os.path.join(dirname, 'readme.md')
with open(readme_filename) as f:
lines = [line.strip() for line in f.readlines()]
top = lines.index('---')
bottom = lines.index('---', top + 1)
frontmatter = yaml.load('\n'.join(lines[top + 1:bottom]))
assert all(key in frontmatter for key in required_keys)
return dirname, frontmatter
def valid_dirname(dirname):
try:
return parse_readme_frontmatter(dirname)
except Exception as e:
print('ERROR: {}'.format(e))
raise argparse.ArgumentTypeError(
'Must be valid Caffe model directory with a correct readme.md')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Download trained model binary.')
parser.add_argument('dirname', type=valid_dirname)
args = parser.parse_args()
# A tiny hack: the dirname validator also returns readme YAML frontmatter.
dirname = args.dirname[0]
frontmatter = args.dirname[1]
model_filename = os.path.join(dirname, frontmatter['caffemodel'])
# Closure-d function for checking SHA1.
def model_checks_out(filename=model_filename, sha1=frontmatter['sha1']):
with open(filename, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest() == sha1
# Check if model exists.
if os.path.exists(model_filename) and model_checks_out():
print("Model already exists.")
sys.exit(0)
# Download and verify model.
urllib.request.urlretrieve(
frontmatter['caffemodel_url'], model_filename, reporthook)
if not model_checks_out():
print('ERROR: model did not download correctly! Run this again.')
sys.exit(1)
| 2,531 | 31.461538 | 78 | py |
Stochastic-Quantization | Stochastic-Quantization-master/caffe/scripts/copy_notebook.py | #!/usr/bin/env python
"""
Takes as arguments:
1. the path to a JSON file (such as an IPython notebook).
2. the path to output file
If 'metadata' dict in the JSON file contains 'include_in_docs': true,
then copies the file to output file, appending the 'metadata' property
as YAML front-matter, adding the field 'category' with value 'notebook'.
"""
import os
import sys
import json
filename = sys.argv[1]
output_filename = sys.argv[2]
content = json.load(open(filename))
if 'include_in_docs' in content['metadata'] and content['metadata']['include_in_docs']:
yaml_frontmatter = ['---']
for key, val in content['metadata'].iteritems():
if key == 'example_name':
key = 'title'
if val == '':
val = os.path.basename(filename)
yaml_frontmatter.append('{}: {}'.format(key, val))
yaml_frontmatter += ['category: notebook']
yaml_frontmatter += ['original_path: ' + filename]
with open(output_filename, 'w') as fo:
fo.write('\n'.join(yaml_frontmatter + ['---']) + '\n')
fo.write(open(filename).read())
| 1,089 | 32.030303 | 87 | py |
multi-head-attention-labeller | multi-head-attention-labeller-master/variants.py | from modules import *
import collections
import numpy
import pickle
import re
import tensorflow as tf
class Model(object):
"""
Implements several variants of the multi-head attention labeller (MHAL).
These were mainly experimental, so don't take them as granted.
The performances reported are obtained with the main model, "model.py".
"""
def __init__(self, config, label2id_sent, label2id_tok):
self.config = config
self.label2id_sent = label2id_sent
self.label2id_tok = label2id_tok
self.UNK = "<unk>"
self.CUNK = "<cunk>"
self.word2id = None
self.char2id = None
self.singletons = None
self.num_heads = None
self.word_ids = None
self.char_ids = None
self.sentence_lengths = None
self.word_lengths = None
self.sentence_labels = None
self.word_labels = None
self.word_embeddings = None
self.char_embeddings = None
self.word_objective_weights = None
self.learning_rate = None
self.loss = None
self.initializer = None
self.is_training = None
self.session = None
self.saver = None
self.train_op = None
self.token_scores = None
self.sentence_scores = None
self.token_predictions = None
self.sentence_predictions = None
self.token_probabilities = None
self.sentence_probabilities = None
self.attention_weights = None
def build_vocabs(self, data_train, data_dev, data_test, embedding_path=None):
"""
Builds the vocabulary based on the the data and embeddings info.
"""
data_source = list(data_train)
if self.config["vocab_include_devtest"]:
if data_dev is not None:
data_source += data_dev
if data_test is not None:
data_source += data_test
char_counter = collections.Counter()
word_counter = collections.Counter()
for sentence in data_source:
for token in sentence.tokens:
char_counter.update(token.value)
w = token.value
if self.config["lowercase"]:
w = w.lower()
if self.config["replace_digits"]:
w = re.sub(r'\d', '0', w)
word_counter[w] += 1
self.char2id = collections.OrderedDict([(self.CUNK, 0)])
for char, count in char_counter.most_common():
if char not in self.char2id:
self.char2id[char] = len(self.char2id)
self.word2id = collections.OrderedDict([(self.UNK, 0)])
for word, count in word_counter.most_common():
if self.config["min_word_freq"] <= 0 or count >= self.config["min_word_freq"]:
if word not in self.word2id:
self.word2id[word] = len(self.word2id)
self.singletons = set([word for word in word_counter if word_counter[word] == 1])
if embedding_path and self.config["vocab_only_embedded"]:
embedding_vocab = {self.UNK}
with open(embedding_path) as f:
for line in f:
line_parts = line.strip().split()
if len(line_parts) <= 2:
continue
w = line_parts[0]
if self.config["lowercase"]:
w = w.lower()
if self.config["replace_digits"]:
w = re.sub(r'\d', '0', w)
embedding_vocab.add(w)
word2id_revised = collections.OrderedDict()
for word in self.word2id:
if word in embedding_vocab and word not in word2id_revised:
word2id_revised[word] = len(word2id_revised)
self.word2id = word2id_revised
print("Total number of words: " + str(len(self.word2id)))
print("Total number of chars: " + str(len(self.char2id)))
print("Total number of singletons: " + str(len(self.singletons)))
def construct_network(self):
"""
Constructs a certain variant of the multi-head attention labeller (MHAL).
"""
self.word_ids = tf.placeholder(tf.int32, [None, None], name="word_ids")
self.char_ids = tf.placeholder(tf.int32, [None, None, None], name="char_ids")
self.sentence_lengths = tf.placeholder(tf.int32, [None], name="sentence_lengths")
self.word_lengths = tf.placeholder(tf.int32, [None, None], name="word_lengths")
self.sentence_labels = tf.placeholder(tf.float32, [None], name="sentence_labels")
self.word_labels = tf.placeholder(tf.float32, [None, None], name="word_labels")
self.word_objective_weights = tf.placeholder(
tf.float32, [None, None], name="word_objective_weights")
self.learning_rate = tf.placeholder(tf.float32, name="learning_rate")
self.is_training = tf.placeholder(tf.int32, name="is_training")
self.loss = 0.0
if self.config["initializer"] == "normal":
self.initializer = tf.random_normal_initializer(stddev=0.1)
elif self.config["initializer"] == "glorot":
self.initializer = tf.glorot_uniform_initializer()
elif self.config["initializer"] == "xavier":
self.initializer = tf.glorot_normal_initializer()
zeros_initializer = tf.zeros_initializer()
self.word_embeddings = tf.get_variable(
name="word_embeddings",
shape=[len(self.word2id), self.config["word_embedding_size"]],
initializer=(zeros_initializer if self.config["emb_initial_zero"] else self.initializer),
trainable=(True if self.config["train_embeddings"] else False))
word_input_tensor = tf.nn.embedding_lookup(self.word_embeddings, self.word_ids)
if self.config["char_embedding_size"] > 0 and self.config["char_recurrent_size"] > 0:
with tf.variable_scope("chars"), tf.control_dependencies(
[tf.assert_equal(tf.shape(self.char_ids)[2],
tf.reduce_max(self.word_lengths),
message="Char dimensions don't match")]):
self.char_embeddings = tf.get_variable(
name="char_embeddings",
shape=[len(self.char2id), self.config["char_embedding_size"]],
initializer=self.initializer,
trainable=True)
char_input_tensor = tf.nn.embedding_lookup(self.char_embeddings, self.char_ids)
char_input_tensor_shape = tf.shape(char_input_tensor)
char_input_tensor = tf.reshape(
char_input_tensor,
shape=[char_input_tensor_shape[0]
* char_input_tensor_shape[1],
char_input_tensor_shape[2],
self.config["char_embedding_size"]])
_word_lengths = tf.reshape(
self.word_lengths, shape=[char_input_tensor_shape[0]
* char_input_tensor_shape[1]])
char_lstm_cell_fw = tf.nn.rnn_cell.LSTMCell(
self.config["char_recurrent_size"],
use_peepholes=self.config["lstm_use_peepholes"],
state_is_tuple=True,
initializer=self.initializer,
reuse=False)
char_lstm_cell_bw = tf.nn.rnn_cell.LSTMCell(
self.config["char_recurrent_size"],
use_peepholes=self.config["lstm_use_peepholes"],
state_is_tuple=True,
initializer=self.initializer,
reuse=False)
# Concatenate the final forward and the backward character contexts
# to obtain a compact character representation for each word.
_, ((_, char_output_fw), (_, char_output_bw)) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=char_lstm_cell_fw, cell_bw=char_lstm_cell_bw, inputs=char_input_tensor,
sequence_length=_word_lengths, dtype=tf.float32, time_major=False)
char_output_tensor = tf.concat([char_output_fw, char_output_bw], axis=-1)
char_output_tensor = tf.reshape(
char_output_tensor,
shape=[char_input_tensor_shape[0], char_input_tensor_shape[1],
2 * self.config["char_recurrent_size"]])
# Include a char-based language modelling loss, LMc.
if self.config["lm_cost_char_gamma"] > 0.0:
self.loss += self.config["lm_cost_char_gamma"] * \
self.construct_lm_cost(
input_tensor_fw=char_output_tensor,
input_tensor_bw=char_output_tensor,
sentence_lengths=self.sentence_lengths,
target_ids=self.word_ids,
lm_cost_type="separate",
name="lm_cost_char_separate")
if self.config["lm_cost_joint_char_gamma"] > 0.0:
self.loss += self.config["lm_cost_joint_char_gamma"] * \
self.construct_lm_cost(
input_tensor_fw=char_output_tensor,
input_tensor_bw=char_output_tensor,
sentence_lengths=self.sentence_lengths,
target_ids=self.word_ids,
lm_cost_type="joint",
name="lm_cost_char_joint")
if self.config["char_hidden_layer_size"] > 0:
char_output_tensor = tf.layers.dense(
inputs=char_output_tensor, units=self.config["char_hidden_layer_size"],
activation=tf.tanh, kernel_initializer=self.initializer)
if self.config["char_integration_method"] == "concat":
word_input_tensor = tf.concat([word_input_tensor, char_output_tensor], axis=-1)
elif self.config["char_integration_method"] == "none":
word_input_tensor = word_input_tensor
else:
raise ValueError("Unknown char integration method")
if self.config["dropout_input"] > 0.0:
dropout_input = (self.config["dropout_input"] * tf.cast(self.is_training, tf.float32)
+ (1.0 - tf.cast(self.is_training, tf.float32)))
word_input_tensor = tf.nn.dropout(
word_input_tensor, dropout_input, name="dropout_word")
word_lstm_cell_fw = tf.nn.rnn_cell.LSTMCell(
self.config["word_recurrent_size"],
use_peepholes=self.config["lstm_use_peepholes"],
state_is_tuple=True,
initializer=self.initializer,
reuse=False)
word_lstm_cell_bw = tf.nn.rnn_cell.LSTMCell(
self.config["word_recurrent_size"],
use_peepholes=self.config["lstm_use_peepholes"],
state_is_tuple=True,
initializer=self.initializer,
reuse=False)
with tf.control_dependencies(
[tf.assert_equal(
tf.shape(self.word_ids)[1],
tf.reduce_max(self.sentence_lengths),
message="Sentence dimensions don't match")]):
(lstm_outputs_fw, lstm_outputs_bw), ((_, lstm_output_fw), (_, lstm_output_bw)) = \
tf.nn.bidirectional_dynamic_rnn(
cell_fw=word_lstm_cell_fw, cell_bw=word_lstm_cell_bw, inputs=word_input_tensor,
sequence_length=self.sentence_lengths, dtype=tf.float32, time_major=False)
lstm_output_states = tf.concat([lstm_output_fw, lstm_output_bw], -1)
if self.config["dropout_word_lstm"] > 0.0:
dropout_word_lstm = (self.config["dropout_word_lstm"] * tf.cast(self.is_training, tf.float32)
+ (1.0 - tf.cast(self.is_training, tf.float32)))
lstm_outputs_fw = tf.nn.dropout(
lstm_outputs_fw, dropout_word_lstm,
noise_shape=tf.convert_to_tensor(
[tf.shape(self.word_ids)[0], 1, self.config["word_recurrent_size"]], dtype=tf.int32))
lstm_outputs_bw = tf.nn.dropout(
lstm_outputs_bw, dropout_word_lstm,
noise_shape=tf.convert_to_tensor(
[tf.shape(self.word_ids)[0], 1, self.config["word_recurrent_size"]], dtype=tf.int32))
lstm_output_states = tf.nn.dropout(lstm_output_states, dropout_word_lstm)
# The forward and backward states are concatenated at every token position.
lstm_outputs_states = tf.concat([lstm_outputs_fw, lstm_outputs_bw], -1) # [B, M, 2 * emb_size]
if self.config["whidden_layer_size"] > 0:
lstm_output_units = self.config["whidden_layer_size"]
num_heads = len(self.label2id_tok)
# Make the number of units a multiple of num_heads.
if lstm_output_units % num_heads != 0:
lstm_output_units = ceil(lstm_output_units / num_heads) * num_heads
lstm_outputs = tf.layers.dense(
inputs=lstm_outputs_states, units=lstm_output_units,
activation=tf.tanh, kernel_initializer=self.initializer) # [B, M, lstm_output_units]
else:
lstm_outputs = lstm_outputs_states
if self.config["model_type"] == "single_head_attention_binary_labels":
if not (len(self.label2id_tok) == 2 and len(self.label2id_sent) == 2):
raise ValueError(
"The model_type you selected (%s) is only available for "
"binary labels! Currently, the no. sentence_labels = %d and "
"the no. token_labels = %d. Consider changing the model type."
% (self.config["model_type"],
len(self.label2id_sent), len(self.label2id_tok)))
self.sentence_scores, self.sentence_predictions, \
self.token_scores, self.token_predictions = \
single_head_attention_binary_labels(
inputs=lstm_outputs,
initializer=self.initializer,
attention_size=self.config["attention_evidence_size"],
sentence_lengths=self.sentence_lengths,
hidden_units=self.config["hidden_layer_size"])
# Include a token-level loss (for sequence labelling).
word_objective_loss = tf.square(self.token_scores - self.word_labels)
word_objective_loss = tf.where(
tf.sequence_mask(self.sentence_lengths),
word_objective_loss, tf.zeros_like(word_objective_loss))
self.loss += self.config["word_objective_weight"] * tf.reduce_sum(
self.word_objective_weights * word_objective_loss)
# Include a sentence-level loss (for sentence classification).
sentence_objective_loss = tf.square(self.sentence_scores - self.sentence_labels)
self.loss += self.config["sentence_objective_weight"] * tf.reduce_sum(sentence_objective_loss)
# Include an attention-level loss for wiring the two hierarchical levels.
if self.config["attention_objective_weight"] > 0.0:
self.loss += self.config["attention_objective_weight"] * (
tf.reduce_sum(
tf.square(
tf.reduce_max(
tf.where(
tf.sequence_mask(self.sentence_lengths),
self.token_scores,
tf.zeros_like(self.token_scores) - 1e6),
axis=-1) - self.sentence_labels))
+
tf.reduce_sum(
tf.square(
tf.reduce_min(
tf.where(
tf.sequence_mask(self.sentence_lengths),
self.token_scores,
tf.zeros_like(self.token_scores) + 1e6),
axis=-1) - 0.0)))
else:
scoring_activation = None
if "scoring_activation" in self.config and self.config["scoring_activation"]:
if self.config["scoring_activation"] == "tanh":
scoring_activation = tf.tanh
elif self.config["scoring_activation"] == "sigmoid":
scoring_activation = tf.sigmoid
elif self.config["scoring_activation"] == "relu":
scoring_activation = tf.nn.relu
elif self.config["scoring_activation"] == "softmax":
scoring_activation = tf.nn.softmax
if "baseline_lstm_last_contexts" in self.config["model_type"]:
self.sentence_scores, self.sentence_predictions, \
self.token_scores, self.token_predictions, \
self.token_probabilities, self.sentence_probabilities, \
self.attention_weights = baseline_lstm_last_contexts(
last_token_contexts=lstm_outputs_states,
last_context=lstm_output_states,
initializer=self.initializer,
scoring_activation=scoring_activation,
sentence_lengths=self.sentence_lengths,
hidden_units=self.config["hidden_layer_size"],
num_sentence_labels=len(self.label2id_sent),
num_token_labels=len(self.label2id_tok))
elif self.config["model_type"] == "single_head_attention_multiple_labels":
self.sentence_scores, self.sentence_predictions, \
self.token_scores, self.token_predictions, \
self.token_probabilities, self.sentence_probabilities, \
self.attention_weights = single_head_attention_multiple_labels(
inputs=lstm_outputs,
initializer=self.initializer,
attention_activation=self.config["attention_activation"],
attention_size=self.config["attention_evidence_size"],
sentence_lengths=self.sentence_lengths,
hidden_units=self.config["hidden_layer_size"],
num_sentence_labels=len(self.label2id_sent),
num_token_labels=len(self.label2id_tok))
elif self.config["model_type"] == "multi_head_attention_with_scores_from_shared_heads":
self.sentence_scores, self.sentence_predictions, \
self.token_scores, self.token_predictions, \
self.token_probabilities, self.sentence_probabilities, \
self.attention_weights = multi_head_attention_with_scores_from_shared_heads(
inputs=lstm_outputs,
initializer=self.initializer,
attention_activation=self.config["attention_activation"],
hidden_units=self.config["hidden_layer_size"],
num_sentence_labels=len(self.label2id_sent),
num_heads=len(self.label2id_tok),
is_training=self.is_training,
dropout=self.config["dropout_attention"],
sentence_lengths=self.sentence_lengths,
use_residual_connection=self.config["residual_connection"],
token_scoring_method=self.config["token_scoring_method"])
elif self.config["model_type"] == "multi_head_attention_with_scores_from_separate_heads":
self.sentence_scores, self.sentence_predictions, \
self.token_scores, self.token_predictions, \
self.token_probabilities, self.sentence_probabilities, \
self.attention_weights = multi_head_attention_with_scores_from_separate_heads(
inputs=lstm_outputs,
initializer=self.initializer,
attention_activation=self.config["attention_activation"],
num_sentence_labels=len(self.label2id_sent),
num_heads=len(self.label2id_tok),
is_training=self.is_training,
dropout=self.config["dropout_attention"],
sentence_lengths=self.sentence_lengths,
normalize_sentence=self.config["normalize_sentence"],
token_scoring_method=self.config["token_scoring_method"],
scoring_activation=scoring_activation,
separate_heads=self.config["separate_heads"])
elif self.config["model_type"] == "single_head_attention_multiple_transformations":
self.sentence_scores, self.sentence_predictions, \
self.token_scores, self.token_predictions, \
self.token_probabilities, self.sentence_probabilities, \
self.attention_weights = single_head_attention_multiple_transformations(
inputs=lstm_outputs,
initializer=self.initializer,
attention_activation=self.config["attention_activation"],
num_sentence_labels=len(self.label2id_sent),
num_heads=len(self.label2id_tok),
sentence_lengths=self.sentence_lengths,
token_scoring_method=self.config["token_scoring_method"],
scoring_activation=scoring_activation,
how_to_compute_attention=self.config["how_to_compute_attention"],
separate_heads=self.config["separate_heads"])
elif self.config["model_type"] == "variant_1":
self.sentence_scores, self.sentence_predictions, \
self.token_scores, self.token_predictions, \
self.token_probabilities, self.sentence_probabilities, \
self.attention_weights = variant_1(
inputs=lstm_outputs,
initializer=self.initializer,
attention_activation=self.config["attention_activation"],
num_sentence_labels=len(self.label2id_sent),
num_heads=len(self.label2id_tok),
hidden_units=self.config["hidden_layer_size"],
sentence_lengths=self.sentence_lengths,
scoring_activation=scoring_activation,
token_scoring_method=self.config["token_scoring_method"],
use_inputs_instead_values=self.config["use_inputs_instead_values"],
separate_heads=self.config["separate_heads"])
elif self.config["model_type"] == "variant_2":
self.sentence_scores, self.sentence_predictions, \
self.token_scores, self.token_predictions, \
self.token_probabilities, self.sentence_probabilities, \
self.attention_weights = variant_2(
inputs=lstm_outputs,
initializer=self.initializer,
attention_activation=self.config["attention_activation"],
num_sentence_labels=len(self.label2id_sent),
num_heads=len(self.label2id_tok),
hidden_units=self.config["hidden_layer_size"],
sentence_lengths=self.sentence_lengths,
scoring_activation=scoring_activation,
use_inputs_instead_values=self.config["use_inputs_instead_values"],
separate_heads=self.config["separate_heads"])
elif self.config["model_type"] == "variant_3":
self.sentence_scores, self.sentence_predictions, \
self.token_scores, self.token_predictions, \
self.token_probabilities, self.sentence_probabilities, \
self.attention_weights = variant_3(
inputs=lstm_outputs,
initializer=self.initializer,
attention_activation=self.config["attention_activation"],
num_sentence_labels=len(self.label2id_sent),
num_heads=len(self.label2id_tok),
attention_size=self.config["attention_evidence_size"],
sentence_lengths=self.sentence_lengths,
scoring_activation=scoring_activation,
separate_heads=self.config["separate_heads"])
elif self.config["model_type"] == "variant_4":
self.sentence_scores, self.sentence_predictions, \
self.token_scores, self.token_predictions, \
self.token_probabilities, self.sentence_probabilities, \
self.attention_weights = variant_4(
inputs=lstm_outputs,
initializer=self.initializer,
attention_activation=self.config["attention_activation"],
num_sentence_labels=len(self.label2id_sent),
num_heads=len(self.label2id_tok),
hidden_units=self.config["hidden_layer_size"],
sentence_lengths=self.sentence_lengths,
scoring_activation=scoring_activation,
token_scoring_method=self.config["token_scoring_method"],
use_inputs_instead_values=self.config["use_inputs_instead_values"],
separate_heads=self.config["separate_heads"])
elif self.config["model_type"] == "variant_5":
self.sentence_scores, self.sentence_predictions, \
self.token_scores, self.token_predictions, \
self.token_probabilities, self.sentence_probabilities, \
self.attention_weights = variant_5(
inputs=lstm_outputs,
initializer=self.initializer,
attention_activation=self.config["attention_activation"],
num_sentence_labels=len(self.label2id_sent),
num_heads=len(self.label2id_tok),
hidden_units=self.config["hidden_layer_size"],
sentence_lengths=self.sentence_lengths,
scoring_activation=scoring_activation,
token_scoring_method=self.config["token_scoring_method"],
use_inputs_instead_values=self.config["use_inputs_instead_values"],
separate_heads=self.config["separate_heads"])
elif self.config["model_type"] == "variant_6":
self.sentence_scores, self.sentence_predictions, \
self.token_scores, self.token_predictions, \
self.token_probabilities, self.sentence_probabilities, \
self.attention_weights = variant_6(
inputs=lstm_outputs,
initializer=self.initializer,
attention_activation=self.config["attention_activation"],
num_sentence_labels=len(self.label2id_sent),
num_heads=len(self.label2id_tok),
hidden_units=self.config["hidden_layer_size"],
scoring_activation=scoring_activation,
token_scoring_method=self.config["token_scoring_method"],
separate_heads=self.config["separate_heads"])
else:
raise ValueError("Unknown/unsupported model type: %s"
% self.config["model_type"])
# Include a token-level loss (for sequence labelling).
if self.config["word_objective_weight"] > 0:
if self.config["token_labels_available"]:
word_objective_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.token_scores, labels=tf.cast(self.word_labels, tf.int32))
word_objective_loss = tf.where(
tf.sequence_mask(self.sentence_lengths),
word_objective_loss, tf.zeros_like(word_objective_loss))
self.loss += self.config["word_objective_weight"] * tf.reduce_sum(
self.word_objective_weights * word_objective_loss)
else:
raise ValueError(
"No token labels available! You cannot supervise on the token-level"
" so please change \"word_objective_weight\" to 0"
" or provide token-annotated files.")
# Include a sentence-level loss (for sentence classification).
if self.config["sentence_objective_weight"] > 0:
sentence_objective_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.sentence_scores, labels=tf.cast(self.sentence_labels, tf.int32))
self.loss += self.config["sentence_objective_weight"] * tf.reduce_sum(sentence_objective_loss)
# Mask the token scores that do not fall in the range of the true sentence length.
# Do this for each head (change shape from [B, M] to [B, M, num_heads]).
tiled_sentence_lengths = tf.tile(
input=tf.expand_dims(
tf.sequence_mask(self.sentence_lengths), axis=-1),
multiples=[1, 1, len(self.label2id_tok)])
self.token_probabilities = tf.where(
tiled_sentence_lengths,
self.token_probabilities,
tf.zeros_like(self.token_probabilities))
if self.config["attention_objective_weight"] > 0.0:
attention_loss = compute_attention_loss(
self.token_probabilities,
self.sentence_labels,
num_sent_labels=len(self.label2id_sent),
num_tok_labels=len(self.label2id_tok),
approach=self.config["aux_loss_approach"],
compute_pairwise=self.config["compute_pairwise_attention"])
self.loss += self.config["attention_objective_weight"] * tf.reduce_sum(attention_loss)
# Apply a gap-distance loss.
if self.config["gap_objective_weight"] > 0.0:
gap_distance_loss = compute_gap_distance_loss(
self.token_probabilities,
self.sentence_labels,
num_sent_labels=len(self.label2id_sent),
num_tok_labels=len(self.label2id_tok),
minimum_gap_distance=self.config["minimum_gap_distance"],
approach=self.config["aux_loss_approach"],
type_distance=self.config["type_distance"])
self.loss += self.config["gap_objective_weight"] * tf.reduce_sum(gap_distance_loss)
# Include a word-based language modelling loss, LMw.
if self.config["lm_cost_lstm_gamma"] > 0.0:
self.loss += self.config["lm_cost_lstm_gamma"] * self.construct_lm_cost(
input_tensor_fw=lstm_outputs_fw,
input_tensor_bw=lstm_outputs_bw,
sentence_lengths=self.sentence_lengths,
target_ids=self.word_ids,
lm_cost_type="separate",
name="lm_cost_lstm_separate")
if self.config["lm_cost_joint_lstm_gamma"] > 0.0:
self.loss += self.config["lm_cost_joint_lstm_gamma"] * self.construct_lm_cost(
input_tensor_fw=lstm_outputs_fw,
input_tensor_bw=lstm_outputs_bw,
sentence_lengths=self.sentence_lengths,
target_ids=self.word_ids,
lm_cost_type="joint",
name="lm_cost_lstm_joint")
self.train_op = self.construct_optimizer(
opt_strategy=self.config["opt_strategy"],
loss=self.loss,
learning_rate=self.learning_rate,
clip=self.config["clip"])
print("Notwork built.")
def construct_lm_cost(
self, input_tensor_fw, input_tensor_bw,
sentence_lengths, target_ids, lm_cost_type, name):
"""
Constructs the char/word-based language modelling objective.
"""
with tf.variable_scope(name):
lm_cost_max_vocab_size = min(
len(self.word2id), self.config["lm_cost_max_vocab_size"])
target_ids = tf.where(
tf.greater_equal(target_ids, lm_cost_max_vocab_size - 1),
x=(lm_cost_max_vocab_size - 1) + tf.zeros_like(target_ids),
y=target_ids)
cost = 0.0
if lm_cost_type == "separate":
lm_cost_fw_mask = tf.sequence_mask(
sentence_lengths, maxlen=tf.shape(target_ids)[1])[:, 1:]
lm_cost_bw_mask = tf.sequence_mask(
sentence_lengths, maxlen=tf.shape(target_ids)[1])[:, :-1]
lm_cost_fw = self._construct_lm_cost(
input_tensor_fw[:, :-1, :],
lm_cost_max_vocab_size,
lm_cost_fw_mask,
target_ids[:, 1:],
name=name + "_fw")
lm_cost_bw = self._construct_lm_cost(
input_tensor_bw[:, 1:, :],
lm_cost_max_vocab_size,
lm_cost_bw_mask,
target_ids[:, :-1],
name=name + "_bw")
cost += lm_cost_fw + lm_cost_bw
elif lm_cost_type == "joint":
joint_input_tensor = tf.concat(
[input_tensor_fw[:, :-2, :], input_tensor_bw[:, 2:, :]], axis=-1)
lm_cost_mask = tf.sequence_mask(
sentence_lengths, maxlen=tf.shape(target_ids)[1])[:, 1:-1]
cost += self._construct_lm_cost(
joint_input_tensor,
lm_cost_max_vocab_size,
lm_cost_mask,
target_ids[:, 1:-1],
name=name + "_joint")
else:
raise ValueError("Unknown lm_cost_type: %s." % lm_cost_type)
return cost
def _construct_lm_cost(
self, input_tensor, lm_cost_max_vocab_size,
lm_cost_mask, target_ids, name):
with tf.variable_scope(name):
lm_cost_hidden_layer = tf.layers.dense(
inputs=input_tensor, units=self.config["lm_cost_hidden_layer_size"],
activation=tf.tanh, kernel_initializer=self.initializer)
lm_cost_output = tf.layers.dense(
inputs=lm_cost_hidden_layer, units=lm_cost_max_vocab_size,
kernel_initializer=self.initializer)
lm_cost_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=lm_cost_output, labels=target_ids)
lm_cost_loss = tf.where(lm_cost_mask, lm_cost_loss, tf.zeros_like(lm_cost_loss))
return tf.reduce_sum(lm_cost_loss)
@staticmethod
def construct_optimizer(opt_strategy, loss, learning_rate, clip):
"""
Applies an optimization strategy to minimize the loss.
"""
if opt_strategy == "adadelta":
optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate)
elif opt_strategy == "adam":
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
elif opt_strategy == "sgd":
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
else:
raise ValueError("Unknown optimisation strategy: %s." % opt_strategy)
if clip > 0.0:
grads, vs = zip(*optimizer.compute_gradients(loss))
grads, gnorm = tf.clip_by_global_norm(grads, clip)
train_op = optimizer.apply_gradients(zip(grads, vs))
else:
train_op = optimizer.minimize(loss)
return train_op
def preload_word_embeddings(self, embedding_path):
"""
Load the word embeddings in advance to get a feel
of the proportion of singletons in the dataset.
"""
loaded_embeddings = set()
embedding_matrix = self.session.run(self.word_embeddings)
with open(embedding_path) as f:
for line in f:
line_parts = line.strip().split()
if len(line_parts) <= 2:
continue
w = line_parts[0]
if self.config["lowercase"]:
w = w.lower()
if self.config["replace_digits"]:
w = re.sub(r'\d', '0', w)
if w in self.word2id and w not in loaded_embeddings:
word_id = self.word2id[w]
embedding = numpy.array(line_parts[1:])
embedding_matrix[word_id] = embedding
loaded_embeddings.add(w)
self.session.run(self.word_embeddings.assign(embedding_matrix))
print("No. of pre-loaded embeddings: %d." % len(loaded_embeddings))
@staticmethod
def translate2id(
token, token2id, unk_token=None, lowercase=False,
replace_digits=False, singletons=None, singletons_prob=0.0):
"""
Maps each token/character to its index.
"""
if lowercase:
token = token.lower()
if replace_digits:
token = re.sub(r'\d', '0', token)
if singletons and token in singletons \
and token in token2id and unk_token \
and numpy.random.uniform() < singletons_prob:
token_id = token2id[unk_token]
elif token in token2id:
token_id = token2id[token]
elif unk_token:
token_id = token2id[unk_token]
else:
raise ValueError("Unable to handle value, no UNK token: %s." % token)
return token_id
def create_input_dictionary_for_batch(self, batch, is_training, learning_rate):
"""
Creates the dictionary fed to the the TF model.
"""
sentence_lengths = numpy.array([len(sentence.tokens) for sentence in batch])
max_sentence_length = sentence_lengths.max()
max_word_length = numpy.array(
[numpy.array([len(token.value) for token in sentence.tokens]).max()
for sentence in batch]).max()
if 0 < self.config["allowed_word_length"] < max_word_length:
max_word_length = min(max_word_length, self.config["allowed_word_length"])
word_ids = numpy.zeros(
(len(batch), max_sentence_length), dtype=numpy.int32)
char_ids = numpy.zeros(
(len(batch), max_sentence_length, max_word_length), dtype=numpy.int32)
word_lengths = numpy.zeros(
(len(batch), max_sentence_length), dtype=numpy.int32)
word_labels = numpy.zeros(
(len(batch), max_sentence_length), dtype=numpy.float32)
sentence_labels = numpy.zeros(
(len(batch)), dtype=numpy.float32)
word_objective_weights = numpy.zeros(
(len(batch), max_sentence_length), dtype=numpy.float32)
sentence_objective_weights = numpy.zeros((len(batch)), dtype=numpy.float32)
# A proportion of the singletons are assigned to UNK (do this just for training).
singletons = self.singletons if is_training else None
singletons_prob = self.config["singletons_prob"] if is_training else 0.0
for i, sentence in enumerate(batch):
sentence_labels[i] = sentence.label_sent
if sentence_labels[i] != 0:
if self.config["sentence_objective_weights_non_default"] > 0.0:
sentence_objective_weights[i] = self.config[
"sentence_objective_weights_non_default"]
else:
sentence_objective_weights[i] = 1.0
else:
sentence_objective_weights[i] = 1.0
for j, token in enumerate(sentence.tokens):
word_ids[i][j] = self.translate2id(
token=token.value,
token2id=self.word2id,
unk_token=self.UNK,
lowercase=self.config["lowercase"],
replace_digits=self.config["replace_digits"],
singletons=singletons,
singletons_prob=singletons_prob)
word_labels[i][j] = token.label_tok
word_lengths[i][j] = len(token.value)
for k in range(min(len(token.value), max_word_length)):
char_ids[i][j][k] = self.translate2id(
token=token.value[k],
token2id=self.char2id,
unk_token=self.CUNK)
if token.enable_supervision is True:
word_objective_weights[i][j] = 1.0
input_dictionary = {
self.word_ids: word_ids,
self.char_ids: char_ids,
self.sentence_lengths: sentence_lengths,
self.word_lengths: word_lengths,
self.sentence_labels: sentence_labels,
self.word_labels: word_labels,
self.word_objective_weights: word_objective_weights,
self.learning_rate: learning_rate,
self.is_training: is_training}
return input_dictionary
def process_batch(self, batch, is_training, learning_rate):
"""
Processes a batch of sentences.
:param batch: a set of sentences of size "max_batch_size".
:param is_training: whether the current batch is a training instance or not.
:param learning_rate: the pace at which learning should be performed.
:return: the cost, the sentence predictions, the sentence label distribution,
the token predictions and the token label distribution.
"""
feed_dict = self.create_input_dictionary_for_batch(batch, is_training, learning_rate)
cost, sentence_pred, sentence_prob, token_pred, token_prob = self.session.run(
[self.loss, self.sentence_predictions, self.sentence_probabilities,
self.token_predictions, self.token_probabilities] +
([self.train_op] if is_training else []), feed_dict=feed_dict)[:5]
return cost, sentence_pred, sentence_prob, token_pred, token_prob
def initialize_session(self):
"""
Initializes a tensorflow session and sets the random seed.
"""
tf.set_random_seed(self.config["random_seed"])
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = self.config["tf_allow_growth"]
session_config.gpu_options.per_process_gpu_memory_fraction = self.config[
"tf_per_process_gpu_memory_fraction"]
self.session = tf.Session(config=session_config)
self.session.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(max_to_keep=1)
@staticmethod
def get_parameter_count():
"""
Counts the total number of parameters.
"""
total_parameters = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
return total_parameters
def get_parameter_count_without_word_embeddings(self):
"""
Counts the number of parameters without those introduced by word embeddings.
"""
shape = self.word_embeddings.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
return self.get_parameter_count() - variable_parameters
def save(self, filename):
"""
Saves a trained model to the path in filename.
"""
dump = dict()
dump["config"] = self.config
dump["label2id_sent"] = self.label2id_sent
dump["label2id_tok"] = self.label2id_tok
dump["UNK"] = self.UNK
dump["CUNK"] = self.CUNK
dump["word2id"] = self.word2id
dump["char2id"] = self.char2id
dump["singletons"] = self.singletons
dump["params"] = {}
for variable in tf.global_variables():
assert (
variable.name not in dump["params"]), \
"Error: variable with this name already exists: %s." % variable.name
dump["params"][variable.name] = self.session.run(variable)
with open(filename, 'wb') as f:
pickle.dump(dump, f, protocol=pickle.HIGHEST_PROTOCOL)
@staticmethod
def load(filename, new_config=None):
"""
Loads a pre-trained MHAL model.
"""
with open(filename, 'rb') as f:
dump = pickle.load(f)
dump["config"]["save"] = None
# Use the saved config, except for values that are present in the new config.
if new_config:
for key in new_config:
dump["config"][key] = new_config[key]
labeler = Model(dump["config"], dump["label2id_sent"], dump["label2id_tok"])
labeler.UNK = dump["UNK"]
labeler.CUNK = dump["CUNK"]
labeler.word2id = dump["word2id"]
labeler.char2id = dump["char2id"]
labeler.singletons = dump["singletons"]
labeler.construct_network()
labeler.initialize_session()
labeler.load_params(filename)
return labeler
def load_params(self, filename):
"""
Loads the parameters of a trained model.
"""
with open(filename, 'rb') as f:
dump = pickle.load(f)
for variable in tf.global_variables():
assert (variable.name in dump["params"]), \
"Variable not in dump: %s." % variable.name
assert (variable.shape == dump["params"][variable.name].shape), \
"Variable shape not as expected: %s, of shape %s. %s" % (
variable.name, str(variable.shape),
str(dump["params"][variable.name].shape))
value = numpy.asarray(dump["params"][variable.name])
self.session.run(variable.assign(value))
| 47,419 | 49.879828 | 110 | py |
multi-head-attention-labeller | multi-head-attention-labeller-master/experiment.py | from collections import Counter
from collections import OrderedDict
from evaluator import Evaluator
from model import Model
# from second_model import Model
# from variants import Model
import gc
import math
import numpy as np
import os
import pandas as pd
import random
import sys
import time
import visualize
import warnings
warnings.filterwarnings("ignore")
if sys.version_info[0] < 3:
import ConfigParser as configparser
else:
import configparser
class Token:
"""
Representation of a single token. Each token has a value, a label,
and a supervision state, which can be enabled or disabled.
"""
unique_labels_tok = set()
labels_tok_dict = {}
def __init__(self, value, label, enable_supervision):
self.value = value
self.label_tok = label
self.enable_supervision = True
if "off" in enable_supervision:
self.enable_supervision = False
self.unique_labels_tok.add(label)
if label not in self.labels_tok_dict.keys():
self.labels_tok_dict[label] = 0
self.labels_tok_dict[label] += 1
class Sentence:
"""
Representation of a sentence as a list of tokens which are of
class Token, each having a value, label and supervision state.
Each sentence is assigned a label which can be either inferred
from its tokens (binary/majority) or specified by the user in
which case the last line is "sent_label" followed by the label).
"""
unique_labels_sent = set()
labels_sent_dict = {}
def __init__(self):
self.tokens = []
self.label_sent = None
def add_token(self, value, label, enable_supervision,
sentence_label_type, default_label):
"""
Adds a token with the specified value, label and state to the list of tokens.
If the token value is "sent_label" then, instead of adding a token, it sets
the sentence label (needing a sentence_label_type and a default_label).
:param value: str, the token value (i.e. what's the actual word, precisely)
:param label: str, the label of the current token
:param enable_supervision: str, whether to allow supervision or not
:param sentence_label_type: str, type of sentence label assignment to expect
(binary, majority, specified). Should be set by "sentence_label" in config.
:param default_label: str, the default label, set by "default_label" in config.
"""
if value == "sent_label":
self.set_label(sentence_label_type, default_label, label)
else:
token = Token(value, label, enable_supervision)
self.tokens.append(token)
def set_label(self, sentence_label_type, default_label, label=None):
"""
Sets the label of the sentence, according to "sentence_label_type"
specified in config, which can be "specified", "majority", or "binary".
The "default_label" is also needed to infer the binary labels.
:param sentence_label_type: str
:param default_label: str
:param label: str
"""
if sentence_label_type == "specified":
assert label is not None or self.label_sent is not None, "Sentence label missing!"
if label is not None:
self.label_sent = label
elif label is None and sentence_label_type == "majority":
majority_label = Counter(
[token.label_tok for token in self.tokens]).most_common()[0][0]
if majority_label is not None:
self.label_sent = majority_label
else:
raise ValueError("Majority label is None! Sentence tokens: ", self.tokens)
elif label is None and sentence_label_type == "binary":
non_default_token_labels = sum(
[0 if token.label_tok == default_label else 1 for token in self.tokens])
if non_default_token_labels > 0:
self.label_sent = "1" # non-default_label
else:
self.label_sent = "0" # default_label
if self.label_sent is not None:
self.unique_labels_sent.add(self.label_sent)
if self.label_sent not in self.labels_sent_dict.keys():
self.labels_sent_dict[self.label_sent] = 0
self.labels_sent_dict[self.label_sent] += 1
def print_sentence(self):
"""
Prints a sentence in this format: "sent_label: tok_i(label_i, is_supervision_enabled_i)".
:rtype: int, representing the number of tokens enabled in this sentence
"""
to_print = []
num_tokens_enabled = 0
for token in self.tokens:
to_print.append("%s (%s, %s)" % (token.value, token.label_tok, token.enable_supervision))
if token.enable_supervision:
num_tokens_enabled += 1
print("sent %s: %s\n" % (self.label_sent, " ".join(to_print)))
if self.tokens[0].enable_supervision:
assert num_tokens_enabled == len(self.tokens), \
"Number of tokens enabled does not equal the number of tokens in the sentence!"
return num_tokens_enabled
class Experiment:
"""
Start an experiment using MHAL.
"""
def __init__(self):
self.config = None
self.label2id_sent = None
self.label2id_tok = None
def read_input_files(self, file_paths, max_sentence_length=-1):
"""
Reads input files in whitespace-separated format.
Splits file_paths on comma, reading from multiple files.
Expects one token per line: first column = value, last column = label.
If the sentence label is already specified in the input file, it needs to have:
first column = "sent_label" and config["sentence_label"] = specified.
If the sentence label is not specified, it will be inferred from the data
depending on the value of config["sentence_label"]. Can be set to majority or binary.
:type file_paths: str
:type max_sentence_length: int
:rtype: list of Sentence objects
"""
sentences = []
line_length = None
sentence = Sentence()
for file_path in file_paths.strip().split(","):
with open(file_path) as f:
for line in f:
line = line.strip()
if len(line) > 0:
line_parts = line.split()
assert len(line_parts) >= 2, \
"Line parts less than 2: %s\n" % line
assert len(line_parts) == line_length or line_length is None, \
"Inconsistent line parts: expected %d, but got %d for line %s." % (
len(line_parts), line_length, line)
line_length = len(line_parts)
# The first element on the line is the token value, while the last is the token label.
# If there is a penultimate column whose value is either "on" or "off", it indicates
# whether supervision on this token is enabled or not. If there is no such element,
# we implicitly assume that supervision is possible and turn it on.
sentence.add_token(
value=line_parts[0], label=line_parts[-1],
enable_supervision=line_parts[-2] if len(line_parts) > 2 else "on",
sentence_label_type=self.config["sentence_label"],
default_label=self.config["default_label"])
elif len(line) == 0 and len(sentence.tokens) > 0:
if max_sentence_length <= 0 or len(sentence.tokens) <= max_sentence_length:
sentence.set_label(
sentence_label_type=self.config["sentence_label"],
default_label=self.config["default_label"])
sentences.append(sentence)
sentence = Sentence()
if len(sentence.tokens) > 0:
if max_sentence_length <= 0 or len(sentence.tokens) <= max_sentence_length:
sentence.set_label(
sentence_label_type=self.config["sentence_label"],
default_label=self.config["default_label"])
sentences.append(sentence)
sentence = Sentence()
return sentences
def create_labels_mapping(self, unique_labels):
"""
Maps a list of U unique labels to an index in [0, U).
The default label (if it exists) will receive index 0.
All other labels get the index corresponding to their natural order.
:type unique_labels: set
:rtype: dict
"""
if self.config["default_label"] and self.config["default_label"] in unique_labels:
sorted_labels = sorted(list(unique_labels.difference(self.config["default_label"])))
label2id = {label: index + 1 for index, label in enumerate(sorted_labels)}
label2id[self.config["default_label"]] = 0
else:
sorted_labels = sorted(list(unique_labels))
label2id = {label: index for index, label in enumerate(sorted_labels)}
return label2id
def convert_labels(self, data):
"""
Converts each sentence and token label to its corresponding index.
:type data: list[Sentence]
:rtype: list[Sentence]
"""
for sentence in data:
current_label_sent = sentence.label_sent
try:
sentence.label_sent = self.label2id_sent[current_label_sent]
except KeyError:
print("Key error for ", current_label_sent)
print("Sentence: ", [token.value for token in sentence.tokens])
print("Label to id", self.label2id_sent)
for token in sentence.tokens:
current_label_tok = token.label_tok
token.label_tok = self.label2id_tok[current_label_tok]
return data
def parse_config(self, config_section, config_path):
"""
Reads the configuration file, guessing the correct data type for each value.
:type config_section: str
:type config_path: str
:rtype: dict
"""
config_parser = configparser.ConfigParser(allow_no_value=True)
config_parser.read(config_path)
config = OrderedDict()
for key, value in config_parser.items(config_section):
if value is None or len(value.strip()) == 0:
config[key] = None
elif value.lower() in ["true", "false"]:
config[key] = config_parser.getboolean(config_section, key)
elif value.isdigit():
config[key] = config_parser.getint(config_section, key)
elif self.is_float(value):
config[key] = config_parser.getfloat(config_section, key)
else:
config[key] = config_parser.get(config_section, key)
return config
@staticmethod
def is_float(value):
"""
Checks if value is of type float.
:type value: any type
:rtype: bool
"""
try:
float(value)
return True
except ValueError:
return False
@staticmethod
def create_batches_of_sentence_ids(sentences, batch_equal_size, max_batch_size):
"""
Creates batches of sentence ids. A positive max_batch_size determines
the maximum number of sentences in each batch. A negative max_batch_size
dynamically creates the batches such that each batch contains
abs(max_batch_size) words. Returns a list of lists with sentences ids.
:type sentences: List[Sentence]
:type batch_equal_size: bool
:type max_batch_size: int
:rtype: List[List[int]]
"""
batches_of_sentence_ids = []
if batch_equal_size:
sentence_ids_by_length = OrderedDict()
for i in range(len(sentences)):
length = len(sentences[i].tokens)
if length not in sentence_ids_by_length:
sentence_ids_by_length[length] = []
sentence_ids_by_length[length].append(i)
for sentence_length in sentence_ids_by_length:
if max_batch_size > 0:
batch_size = max_batch_size
else:
batch_size = int((-1 * max_batch_size) / sentence_length)
for i in range(0, len(sentence_ids_by_length[sentence_length]), batch_size):
batches_of_sentence_ids.append(
sentence_ids_by_length[sentence_length][i:i + batch_size])
else:
current_batch = []
max_sentence_length = 0
for i in range(len(sentences)):
current_batch.append(i)
if len(sentences[i].tokens) > max_sentence_length:
max_sentence_length = len(sentences[i].tokens)
if ((0 < max_batch_size <= len(current_batch))
or (max_batch_size <= 0
and len(current_batch) * max_sentence_length >= (-1 * max_batch_size))):
batches_of_sentence_ids.append(current_batch)
current_batch = []
max_sentence_length = 0
if len(current_batch) > 0:
batches_of_sentence_ids.append(current_batch)
return batches_of_sentence_ids
def process_sentences(self, sentences, model, is_training, learning_rate, name):
"""
Obtains predictions and returns the evaluation metrics.
:type sentences: List[Sentence]
:type model: Model
:type is_training: bool
:type learning_rate: float
:type name: str
:rtype: List[floats]
"""
evaluator = Evaluator(self.label2id_sent, self.label2id_tok,
self.config["conll03_eval"])
batches_of_sentence_ids = self.create_batches_of_sentence_ids(
sentences, self.config["batch_equal_size"], self.config["max_batch_size"])
if is_training:
random.shuffle(batches_of_sentence_ids)
all_batches, all_sentence_probs, all_token_probs = [], [], []
for batch_of_sentence_ids in batches_of_sentence_ids:
batch = [sentences[i] for i in batch_of_sentence_ids]
cost, sentence_pred, sentence_probs, token_pred, token_probs = \
model.process_batch(batch, is_training, learning_rate)
evaluator.append_data(cost, batch, sentence_pred, token_pred)
if "test" in name and self.config["plot_predictions_html"]:
all_batches.append(batch)
all_sentence_probs.append(sentence_probs)
all_token_probs.append(token_probs)
# Plot the token scores for each sentence in the batch.
if "test" in name and self.config["plot_token_scores"]:
for sentence, token_proba_per_sentence, sent_pred in zip(batch, token_probs, sentence_pred):
if sentence.label_sent != 0 and sentence.label_sent == sent_pred and len(sentence.tokens) > 5:
visualize.plot_token_scores(
token_probs=token_proba_per_sentence,
sentence=sentence,
id2label_tok=evaluator.id2label_tok,
plot_name=self.config["path_plot_token_scores"])
while self.config["garbage_collection"] and gc.collect() > 0:
pass
results = evaluator.get_results(
name=name, token_labels_available=self.config["token_labels_available"])
for key in results:
print("%s_%s: %s" % (name, key, str(results[key])))
evaluator.get_results_nice_print(
name=name, token_labels_available=self.config["token_labels_available"])
# Create html visualizations based on the test set predictions.
if "test" in name and self.config["plot_predictions_html"]:
save_name = (self.config["to_write_filename"].split("/")[-1]).split(".")[0]
visualize.plot_predictions(
all_sentences=all_batches,
all_sentence_probs=all_sentence_probs,
all_token_probs=all_token_probs,
id2label_tok=evaluator.id2label_tok,
html_name=self.config["path_plot_predictions_html"] + "/%s" % save_name,
sent_binary=len(self.label2id_sent) == 2)
return results
def run_baseline(self):
"""
Runs majority and random baselines.
"""
if self.config["path_train"] and len(self.config["path_train"]) > 0:
data_train = []
for path_train in self.config["path_train"].strip().split(":"):
data_train += self.read_input_files(
file_paths=path_train,
max_sentence_length=self.config["max_train_sent_length"])
majority_sentence_label = Counter(Sentence.labels_sent_dict).most_common(1)[0][0]
majority_token_label = Counter(Token.labels_tok_dict).most_common(1)[0][0]
print("Most common sentence label (as in the train set) = ", majority_sentence_label)
print("Most common token label (as in the train set) = ", majority_token_label)
self.label2id_sent = self.create_labels_mapping(Sentence.unique_labels_sent)
self.label2id_tok = self.create_labels_mapping(Token.unique_labels_tok)
print("Sentence labels to id: ", self.label2id_sent)
print("Token labels to id: ", self.label2id_tok)
df_results = None
if self.config["path_test"] is not None:
i = 0
for path_test in self.config["path_test"].strip().split(":"):
data_test = self.read_input_files(path_test)
data_test = self.convert_labels(data_test)
# Majority baseline.
majority_pred_sent = [self.label2id_sent[majority_sentence_label]] * len(data_test)
majority_pred_tok = []
for sentence in data_test:
majority_pred_tok.append(
[self.label2id_tok[majority_token_label]] * len(sentence.tokens))
majority_evaluator = Evaluator(
self.label2id_sent, self.label2id_tok, self.config["conll03_eval"])
majority_evaluator.append_data(
0.0, data_test, majority_pred_sent, majority_pred_tok)
name = "majority_test" + str(i)
results = majority_evaluator.get_results(
name=name, token_labels_available=self.config["token_labels_available"])
for key in results:
print("%s_%s: %s" % (name, key, str(results[key])))
majority_evaluator.get_results_nice_print(
name=name, token_labels_available=self.config["token_labels_available"])
if df_results is None:
df_results = pd.DataFrame(columns=results.keys())
df_results = df_results.append(results, ignore_index=True)
# Random baseline.
random_pred_sent = []
random_pred_tok = []
for sentence in data_test:
random_pred_sent.append(random.randint(0, len(self.label2id_sent) - 1))
random_pred_tok.append(
[random.randint(0, len(self.label2id_tok) - 1)
for _ in range(len(sentence.tokens))])
random_evaluator = Evaluator(
self.label2id_sent, self.label2id_tok, self.config["conll03_eval"])
random_evaluator.append_data(
0.0, data_test, random_pred_sent, random_pred_tok)
name = "rand_test" + str(i)
results = random_evaluator.get_results(
name=name, token_labels_available=self.config["token_labels_available"])
for key in results:
print("%s_%s: %s" % (name, key, str(results[key])))
random_evaluator.get_results_nice_print(
name=name, token_labels_available=self.config["token_labels_available"])
df_results = df_results.append(results, ignore_index=True)
i += 1
# Save data frame with all the training and testing results
df_results.to_csv("".join(self.config["to_write_filename"].split(".")[:-1])
+ "_df_results.txt", index=False, sep="\t", encoding="utf-8")
def run_experiment(self, config_path):
"""
Runs an experiment with MHAL.
:type config_path: str
"""
self.config = self.parse_config("config", config_path)
# If you already have a pre-trained model that you just want to test/visualize, set
# "load_pretrained_model" to True and add the path to the saved model in "save".
if self.config["load_pretrained_model"]:
model_filename = experiment.config["save"]
loaded_model = Model.load(model_filename)
print("Loaded model from %s" % model_filename)
experiment.label2id_sent = loaded_model.label2id_sent
experiment.label2id_tok = loaded_model.label2id_tok
print("Sentence labels to id: ", experiment.label2id_sent)
print("Token labels to id: ", experiment.label2id_tok)
if experiment.config["path_test"]:
for d, path_data_test in enumerate(experiment.config["path_test"].strip().split(":")):
data_test_loaded = experiment.read_input_files(path_data_test)
data_test_loaded = experiment.convert_labels(data_test_loaded)
experiment.process_sentences(
data_test_loaded, loaded_model, is_training=False,
learning_rate=0.0, name="test" + str(d))
return
# Train and test a new model.
initialize_writer(self.config["to_write_filename"])
i_rand = random.randint(1, 10000)
print("i_rand = ", i_rand)
temp_model_path = "models/temp_model_%d" % (
int(time.time()) + i_rand) + ".model"
if "random_seed" in self.config:
random.seed(self.config["random_seed"])
np.random.seed(self.config["random_seed"])
for key, val in self.config.items():
print(str(key) + " = " + str(val))
# Run majority and random baselines.
if "baseline" in self.config["model_type"]:
self.run_baseline()
return
data_train, data_dev, data_test = None, None, None
if self.config["path_train"] and len(self.config["path_train"]) > 0:
data_train = []
for path_train in self.config["path_train"].strip().split(":"):
data_train += self.read_input_files(
file_paths=path_train,
max_sentence_length=self.config["max_train_sent_length"])
if self.config["path_dev"] and len(self.config["path_dev"]) > 0:
data_dev = []
for path_dev in self.config["path_dev"].strip().split(":"):
data_dev += self.read_input_files(file_paths=path_dev)
if self.config["path_test"] and len(self.config["path_test"]) > 0:
data_test = []
for path_test in self.config["path_test"].strip().split(":"):
data_test += self.read_input_files(file_paths=path_test)
self.label2id_sent = self.create_labels_mapping(Sentence.unique_labels_sent)
self.label2id_tok = self.create_labels_mapping(Token.unique_labels_tok)
print("Sentence labels to id: ", self.label2id_sent)
print("Token labels to id: ", self.label2id_tok)
data_train = self.convert_labels(data_train) if data_train else None
data_dev = self.convert_labels(data_dev) if data_dev else None
data_test = self.convert_labels(data_test) if data_test else None
data_train = data_train[:50]
data_dev = data_dev[:50]
data_test = data_test[:50]
model = Model(self.config, self.label2id_sent, self.label2id_tok)
model.build_vocabs(data_train, data_dev, data_test,
embedding_path=self.config["preload_vectors"])
model.construct_network()
model.initialize_session()
if self.config["preload_vectors"]:
model.preload_word_embeddings(self.config["preload_vectors"])
print("Parameter count: %d."
% model.get_parameter_count())
print("Parameter count without word embeddings: %d."
% model.get_parameter_count_without_word_embeddings())
if data_train is None:
raise ValueError("No training set provided!")
model_selector_splits = self.config["model_selector"].split(":")
if type(self.config["model_selector_ratio"]) == str:
model_selector_ratios_splits = [
float(val) for val in self.config["model_selector_ratio"].split(":")]
else:
model_selector_ratios_splits = [self.config["model_selector_ratio"]]
model_selector_type = model_selector_splits[-1]
model_selector_values = model_selector_splits[:-1]
assert (len(model_selector_values) == len(model_selector_ratios_splits)
or len(model_selector_ratios_splits) == 1), \
"Model selector values and ratios don't match!"
# Each model_selector_value contributes in proportion to its
# corresponding (normalized) weight value. If just one ratio is specified,
# all model_selector_values receive equal weight.
if len(model_selector_ratios_splits) == 1:
normalized_ratio = model_selector_ratios_splits[0] / sum(
model_selector_ratios_splits * len(model_selector_values))
model_selector_to_ratio = {value: normalized_ratio for value in model_selector_values}
else:
sum_ratios = sum(model_selector_ratios_splits)
normalized_ratios = [ratio / sum_ratios for ratio in model_selector_ratios_splits]
model_selector_to_ratio = {value: ratio for value, ratio in
zip(model_selector_values, normalized_ratios)}
best_selector_value = 0.0
if model_selector_type == "low":
best_selector_value = float("inf")
best_epoch = -1
learning_rate = self.config["learning_rate"]
df_results = None
for epoch in range(self.config["epochs"]):
print("EPOCH: %d" % epoch)
print("Learning rate: %f" % learning_rate)
random.shuffle(data_train)
results_train = self.process_sentences(
data_train, model, is_training=True,
learning_rate=learning_rate, name="train_epoch%d" % epoch)
if df_results is None:
df_results = pd.DataFrame(columns=results_train.keys())
df_results = df_results.append(results_train, ignore_index=True)
if data_dev:
results_dev = self.process_sentences(
data_dev, model, is_training=False,
learning_rate=0.0, name="dev_epoch%d" % epoch)
df_results = df_results.append(results_dev, ignore_index=True)
if math.isnan(results_dev["cost_sum"]) or math.isinf(results_dev["cost_sum"]):
raise ValueError("Cost is NaN or Inf!")
results_dev_for_model_selector = sum([
results_dev[model_selector] * ratio
for model_selector, ratio in model_selector_to_ratio.items()])
if (epoch == 0
or (model_selector_type == "high"
and results_dev_for_model_selector > best_selector_value)
or (model_selector_type == "low"
and results_dev_for_model_selector < best_selector_value)):
best_epoch = epoch
best_selector_value = results_dev_for_model_selector
model.saver.save(sess=model.session, save_path=temp_model_path,
latest_filename=os.path.basename(temp_model_path) + ".checkpoint")
print("Best epoch: %d" % best_epoch)
print("*" * 50 + "\n")
if 0 < self.config["stop_if_no_improvement_for_epochs"] <= epoch - best_epoch:
break
if epoch - best_epoch > 3:
learning_rate *= self.config["learning_rate_decay"]
while self.config["garbage_collection"] and gc.collect() > 0:
pass
if data_dev and best_epoch >= 0:
model.saver.restore(model.session, temp_model_path)
os.remove(temp_model_path + ".checkpoint")
os.remove(temp_model_path + ".data-00000-of-00001")
os.remove(temp_model_path + ".index")
os.remove(temp_model_path + ".meta")
if self.config["save"] is not None and len(self.config["save"]) > 0:
model.save(self.config["save"])
if self.config["path_test"] is not None:
for i, path_test in enumerate(self.config["path_test"].strip().split(":")):
data_test = self.read_input_files(path_test)
data_test = self.convert_labels(data_test)
data_test = data_test[:50]
results_test = self.process_sentences(
data_test, model, is_training=False,
learning_rate=0.0, name="test" + str(i))
df_results = df_results.append(results_test, ignore_index=True)
# Save all the training and testing results in csv format.
df_results.to_csv("".join(self.config["to_write_filename"].split(".")[:-1])
+ "_df_results.txt", index=False, sep="\t", encoding="utf-8")
class Writer:
"""
A class that allows printing to file and to std output at the same time.
"""
def __init__(self, *writers):
self.writers = writers
def write(self, text):
for w in self.writers:
w.write(text)
def flush(self):
pass
def initialize_writer(to_write_filename):
"""
Method to initialize my writer class.
:param to_write_filename: path to write the file to.
"""
file_out = open(to_write_filename, "wt")
sys.stdout = Writer(sys.stdout, file_out)
if __name__ == "__main__":
experiment = Experiment()
experiment.run_experiment(sys.argv[1])
| 31,116 | 43.580229 | 114 | py |
multi-head-attention-labeller | multi-head-attention-labeller-master/modules.py | from math import ceil
import tensorflow as tf
def layer_normalization(layer, epsilon=1e-8):
"""
Implements layer normalization.
:param layer: has 2-dimensional, the first dimension is the batch_size
:param epsilon: a small number to avoid numerical issues, such as zero division.
:return: normalized tensor, of the same shape as the input
"""
with tf.variable_scope("layer_norm"):
params_shape = layer.get_shape()[-1:]
mean, variance = tf.nn.moments(layer, [-1], keep_dims=True)
beta = tf.get_variable(
name="beta", shape=params_shape, initializer=tf.zeros_initializer(), trainable=True)
gamma = tf.get_variable(
name="gamma", shape=params_shape, initializer=tf.ones_initializer(), trainable=True)
normalized = (layer - mean) / ((variance + epsilon) ** 0.5)
outputs = gamma * normalized + beta
return outputs
def division_masking(inputs, axis, multiplies):
"""
Masking used when dividing one element by the sum on a certain axis.
Division by 0 is not possible -- all values will be -infinity, instead.
:param inputs: the input needed to be divided
:param axis: axis on which to perform the reduced sum
:param multiplies: the shape to be used when tiling the division masks.
:return: the correct normalized inputs (with -infinity for divisions by 0).
"""
division_masks = tf.sign(tf.reduce_sum(inputs, axis=axis, keep_dims=True))
division_masks = tf.tile(division_masks, multiples=multiplies)
divided_inputs = tf.where(
tf.equal(division_masks, 0),
tf.zeros_like(inputs),
# tf.ones_like(inputs) * (-2 ** 32 + 1.0),
tf.div(inputs, tf.reduce_sum(inputs, axis=axis, keep_dims=True)))
return divided_inputs
def label_smoothing(labels, epsilon=0.1):
"""
Implements label smoothing. This prevents the model from becoming
over-confident about its predictions and thus, less prone to overfitting.
Label smoothing regularizes the model and makes it more adaptable.
:param labels: 3D tensor with the last dimension as the number of labels
:param epsilon: smoothing rate
:return: smoothed labels
"""
num_labels = labels.get_shape().as_list()[-1]
return ((1 - epsilon) * labels) + (epsilon / num_labels)
def mask(inputs, queries=None, keys=None, mask_type=None):
"""
Generates masks and apply them to 3D inputs.
inputs: 3D tensor. [B, M, M]
queries: 3D tensor. [B, M, E]
keys: 3D tensor. [B, M, E]
"""
padding_num = -2 ** 32 + 1
if "key" in mask_type:
masks = tf.sign(tf.reduce_sum(tf.abs(keys), axis=-1)) # [B, M]
masks = tf.expand_dims(masks, axis=1) # [B, 1, M]
masks = tf.tile(masks, [1, tf.shape(queries)[1], 1]) # [B, M, M]
paddings = tf.ones_like(inputs) * padding_num
outputs = tf.where(tf.equal(masks, 0), paddings, inputs) # [B, M, M]
elif "query" in mask_type:
masks = tf.sign(tf.reduce_sum(tf.abs(queries), axis=-1)) # [B, M]
masks = tf.expand_dims(masks, axis=-1) # [B, M, 1]
masks = tf.tile(masks, [1, 1, tf.shape(keys)[1]]) # [B, M, M]
outputs = inputs * masks
else:
raise ValueError("Unknown mask type: %s. You need to choose "
"between \"keys\" and \"query\"." % mask_type)
return outputs
def mask_2(inputs, queries=None, keys=None, mask_type=None):
"""
Generates masks and apply them to 4D inputs.
inputs: 3D tensor. [H, B, M, M]
queries: 3D tensor. [H, B, M, E]
keys: 3D tensor. [H, B, M, E]
"""
padding_num = -2 ** 32 + 1
if "key" in mask_type:
masks = tf.sign(tf.reduce_sum(tf.abs(keys), axis=-1)) # [H, B, M]
masks = tf.expand_dims(masks, axis=2) # [H, B, 1, M]
masks = tf.tile(masks, [1, 1, tf.shape(queries)[2], 1]) # [H, B, M, M]
paddings = tf.ones_like(inputs) * padding_num
outputs = tf.where(tf.equal(masks, 0), paddings, inputs) # [H, B, M, M]
elif "query" in mask_type:
masks = tf.sign(tf.reduce_sum(tf.abs(queries), axis=-1)) # [H, B, M]
masks = tf.expand_dims(masks, axis=-1) # [H, B, M, 1]
masks = tf.tile(masks, [1, 1, 1, tf.shape(keys)[2]]) # [H, B, M, M]
outputs = inputs * masks
else:
raise ValueError("Unknown mask type: %s. You need to choose "
"between \"keys\" and \"query\"." % mask_type)
return outputs
def cosine_distance_loss(inputs, take_abs=False):
"""
Computes the cosine pairwise distance loss between the input heads.
:param inputs: expects tensor with its last two dimensions [*, H, E],
where H = num heads and E = arbitrary vector dimension.
:param take_abs: take the absolute value of the cosine similarity; this
has the effect of switching from [-1, 1] to [0, 1], with the minimum at 0,
i.e. when the vectors are orthogonal, which is what we want.
However, this might not be differentiable at 0.
:return: loss of the cosine distance between any 2 pairs of head vectors.
"""
with tf.variable_scope("cosine_distance_loss"):
# Calculate the cosine similarity and cosine distance.
# The goal is to maximize the cosine distance.
normalized_inputs = tf.nn.l2_normalize(inputs, axis=-1)
permutation = list(range(len(inputs.get_shape().as_list())))
permutation[-1], permutation[-2] = permutation[-2], permutation[-1]
cos_similarity = tf.matmul(
normalized_inputs, tf.transpose(normalized_inputs, permutation))
# Mask the lower diagonal matrix.
ones = tf.ones_like(cos_similarity)
mask_upper = tf.matrix_band_part(ones, 0, -1) # upper triangular part
mask_diagonal = tf.matrix_band_part(ones, 0, 0) # diagonal
mask_matrix = tf.cast(mask_upper - mask_diagonal, dtype=tf.bool)
upper_triangular_flat = tf.boolean_mask(cos_similarity, mask_matrix)
if take_abs:
return tf.reduce_mean(tf.math.abs(upper_triangular_flat))
else:
return tf.reduce_mean(upper_triangular_flat)
def single_head_attention_binary_labels(
inputs,
initializer,
attention_size,
sentence_lengths,
hidden_units):
"""
Computes single-head attention (just normal, vanilla, soft attention).
:param inputs: 3D floats of shape [B, M, E]
:param initializer: type of initializer (best if Glorot or Xavier)
:param attention_size: number of units to use for the attention evidence
:param sentence_lengths: 2D ints of shape [B, M]
:param hidden_units: number of units to use for the processed sent tensor
:return sentence_scores: result of the attention * input; floats of shape [B]
:return sentence_predictions: predicted labels for each sentence in the batch; ints of shape [B]
:return token_scores: result of the un-normalized attention weights; floats of shape [B, M]
:return token_predictions: predicted labels for each token in each sentence; ints of shape [B, M]
"""
with tf.variable_scope("single_head_attention_binary_labels"):
attention_evidence = tf.layers.dense(
inputs=inputs, units=attention_size,
activation=tf.tanh, kernel_initializer=initializer) # [B, M, attention_size]
attention_weights = tf.layers.dense(
inputs=attention_evidence, units=1,
kernel_initializer=initializer) # [B, M, 1]
attention_weights = tf.squeeze(attention_weights, axis=-1) # [B, M]
attention_weights = tf.sigmoid(attention_weights)
token_scores = attention_weights
token_predictions = tf.where(
tf.greater_equal(token_scores, 0.5),
tf.ones_like(token_scores),
tf.zeros_like(token_scores))
token_predictions = tf.cast(tf.where(
tf.sequence_mask(sentence_lengths),
token_predictions,
tf.zeros_like(token_predictions) - 1e6), tf.int32)
attention_weights = tf.where(
tf.sequence_mask(sentence_lengths),
attention_weights, tf.zeros_like(attention_weights))
attention_weights = attention_weights / tf.reduce_sum(
attention_weights, axis=1, keep_dims=True) # [B, M]
product = inputs * tf.expand_dims(attention_weights, axis=-1) # [B, M, E]
processed_tensor = tf.reduce_sum(product, axis=1) # [B, E]
if hidden_units > 0:
processed_tensor = tf.layers.dense(
inputs=processed_tensor, units=hidden_units,
activation=tf.tanh, kernel_initializer=initializer) # [B, hidden_units]
sentence_scores = tf.layers.dense(
inputs=processed_tensor, units=1,
activation=tf.sigmoid, kernel_initializer=initializer,
name="output_sent_single_head_ff") # [B, 1]
sentence_scores = tf.reshape(
sentence_scores, shape=[tf.shape(processed_tensor)[0]]) # [B]
sentence_predictions = tf.where(
tf.greater_equal(sentence_scores, 0.5),
tf.ones_like(sentence_scores, dtype=tf.int32),
tf.zeros_like(sentence_scores, dtype=tf.int32)) # [B]
return sentence_scores, sentence_predictions, token_scores, token_predictions
def baseline_lstm_last_contexts(
last_token_contexts,
last_context,
initializer,
scoring_activation,
sentence_lengths,
hidden_units,
num_sentence_labels,
num_token_labels):
"""
Computes token and sentence scores/predictions solely from the last LSTM contexts.
vectors that the Bi-LSTM has produced. Works for flexible no. of labels.
:param last_token_contexts: the (concatenated) Bi-LSTM outputs per-token.
:param last_context: the (concatenated) Bi-LSTM final state.
:param initializer: type of initializer (best if Glorot or Xavier)
:param scoring_activation: used in computing the sentence scores from the token scores (per-head)
:param sentence_lengths: 2D ints of shape [B, M]
:param hidden_units: number of units to use for the processed sentence tensor
:param num_sentence_labels: number of unique sentence labels
:param num_token_labels: number of unique token labels
:return sentence_scores: 2D floats of shape [B, num_sentence_labels]
:return sentence_predictions: predicted labels for each sentence in the batch; ints of shape [B]
:return token_scores: 3D floats of shape [B, M, num_token_labels]
:return token_predictions: predicted labels for each token in each sentence; ints of shape [B, M]
:return: attention weights will be a tensor of zeros of shape [B, M, num_token_labels].
"""
with tf.variable_scope("baseline_lstm_last_contexts"):
if hidden_units > 0:
processed_tensor = tf.layers.dense(
last_context, units=hidden_units,
activation=tf.tanh, kernel_initializer=initializer)
token_scores = tf.layers.dense(
last_token_contexts, units=hidden_units,
activation=tf.tanh, kernel_initializer=initializer)
else:
processed_tensor = last_context
token_scores = last_token_contexts
sentence_scores = tf.layers.dense(
processed_tensor, units=num_sentence_labels,
activation=scoring_activation, kernel_initializer=initializer,
name="sentence_scores_lstm_ff") # [B, num_sentence_labels]
sentence_probabilities = tf.nn.softmax(sentence_scores, axis=-1)
sentence_predictions = tf.argmax(sentence_probabilities, axis=-1) # [B]
token_scores = tf.layers.dense(
token_scores, units=num_token_labels,
activation=scoring_activation, kernel_initializer=initializer,
name="token_scores_lstm_ff") # [B, M, num_token_labels]
masked_sentence_lengths = tf.tile(
input=tf.expand_dims(
tf.sequence_mask(sentence_lengths), axis=-1),
multiples=[1, 1, num_token_labels])
token_scores = tf.where(
masked_sentence_lengths,
token_scores,
tf.zeros_like(token_scores)) # [B, M, num_token_labels]
token_probabilities = tf.nn.softmax(token_scores, axis=-1)
token_predictions = tf.argmax(token_probabilities, axis=-1)
attention_weights = tf.zeros_like(token_scores)
return sentence_scores, sentence_predictions, token_scores, token_predictions, \
token_probabilities, sentence_probabilities, attention_weights
def single_head_attention_multiple_labels(
inputs,
initializer,
attention_activation,
attention_size,
sentence_lengths,
hidden_units,
num_sentence_labels,
num_token_labels):
"""
Computes single-head attention, but adapt it (naively) to make it work for multiple labels.
:param inputs: 3D floats of shape [B, M, E]
:param initializer: type of initializer (best if Glorot or Xavier)
:param attention_activation: type of attention activation (soft, sharp, linear, etc)
:param attention_size: number of units to use for the attention evidence
:param sentence_lengths: 2D ints of shape [B, M]
:param hidden_units: number of units to use for the processed sent tensor
:param num_sentence_labels: number of unique sentence labels
:param num_token_labels: number of unique token labels
:return sentence_scores: 2D floats of shape [B, num_sentence_labels]
:return sentence_predictions: predicted labels for each sentence in the batch; ints of shape [B]
:return token_scores: 3D floats of shape [B, M, num_token_labels]
:return token_predictions: predicted labels for each token in each sentence; ints of shape [B, M]
"""
with tf.variable_scope("SHA_multiple_labels"):
attention_evidence = tf.layers.dense(
inputs=inputs, units=attention_size,
activation=tf.tanh, kernel_initializer=initializer) # [B, M, attention_size]
attention_evidence = tf.layers.dense(
inputs=attention_evidence, units=1,
kernel_initializer=initializer) # [B, M, 1]
attention_evidence = tf.squeeze(attention_evidence, axis=-1) # [B, M]
# Apply a non-linear layer to obtain (un-normalized) attention weights.
if attention_activation == "soft":
attention_weights = tf.nn.sigmoid(attention_evidence)
elif attention_activation == "sharp":
attention_weights = tf.math.exp(attention_evidence)
elif attention_activation == "linear":
attention_weights = attention_evidence
elif attention_activation == "softmax":
attention_weights = tf.nn.softmax(attention_evidence)
else:
raise ValueError("Unknown/unsupported activation for attention activation: %s."
% attention_activation)
# Mask attention weights.
attention_weights = tf.where(
tf.sequence_mask(sentence_lengths),
attention_weights, tf.zeros_like(attention_weights))
attention_weights_unnormalized = attention_weights
# Normalize attention weights.
if attention_activation != "softmax":
attention_weights = attention_weights / tf.reduce_sum(
attention_weights, axis=-1, keep_dims=True) # [B, M]
token_scores = tf.layers.dense(
inputs=tf.expand_dims(attention_weights_unnormalized, -1),
units=num_token_labels,
kernel_initializer=initializer,
name="output_single_head_token_scores_ff") # [B, M, num_token_labels]
token_probabilities = tf.nn.softmax(token_scores)
token_predictions = tf.argmax(token_probabilities,
axis=2, output_type=tf.int32) # [B, M]
product = inputs * tf.expand_dims(attention_weights, axis=-1) # [B, M, E]
processed_tensor = tf.reduce_sum(product, axis=1) # [B, E]
if hidden_units > 0:
processed_tensor = tf.layers.dense(
inputs=processed_tensor, units=hidden_units,
activation=tf.tanh, kernel_initializer=initializer) # [B, hidden_units]
sentence_scores = tf.layers.dense(
inputs=processed_tensor, units=num_sentence_labels,
kernel_initializer=initializer,
name="output_multi_sent_specified_scores_ff") # [B, num_unique_sent_labels]
sentence_probabilities = tf.nn.softmax(sentence_scores, axis=-1)
sentence_predictions = tf.argmax(sentence_probabilities, axis=-1) # [B]
return sentence_scores, sentence_predictions, token_scores, token_predictions, \
token_probabilities, sentence_probabilities, attention_weights
def multi_head_attention_with_scores_from_shared_heads(
inputs,
initializer,
attention_activation,
hidden_units,
num_sentence_labels,
num_heads,
is_training,
dropout,
sentence_lengths,
use_residual_connection,
token_scoring_method):
"""
Computes multi-head attention (mainly inspired by the transformer architecture).
This method does not take into account any masking at any level.
All the masking will be performed before computing a primary/secondary loss.
:param inputs: 3D floats of shape [B, M, E]
:param initializer: type of initializer (best if Glorot or Xavier)
:param attention_activation: type of attention activation (linear, softmax or sigmoid)
:param hidden_units: number of units to use for the processed sent tensor
:param num_sentence_labels: number of unique sentence labels
:param num_heads: number of unique token labels
:param is_training: if set to True, the current phase is a training one (rather than testing)
:param dropout: the keep_probs value for the dropout
:param sentence_lengths: the true sentence lengths, used for masking
:param use_residual_connection: if set to True, a residual connection is added to the inputs
:param token_scoring_method: can be either max, sum or avg
:return sentence_scores: 2D floats of shape [B, num_sentence_labels]
:return sentence_predictions: predicted labels for each sentence in the batch; ints of shape [B]
:return token_scores: 3D floats of shape [B, M, num_heads]
:return token_predictions: predicted labels for each token in each sentence; ints of shape [B, M]
:return token_probabilities: the token scores normalized across the axis
"""
with tf.variable_scope("MHA_sentence_scores_from_shared_heads"):
num_units = inputs.get_shape().as_list()[-1]
if num_units % num_heads != 0:
num_units = ceil(num_units / num_heads) * num_heads
inputs = tf.layers.dense(inputs, num_units) # [B, M, num_units]
# Project to get the queries, keys, and values.
queries = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer) # [B, M, num_units]
keys = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer) # [B, M, num_units]
values = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer) # [B, M, num_units]
# Mask out the keys, queries and values: replace with 0 all the token
# positions between the true and the maximum sentence length.
multiplication_mask = tf.tile(
input=tf.expand_dims(tf.sequence_mask(sentence_lengths), axis=-1),
multiples=[1, 1, num_units]) # [B, M, num_units]
queries = tf.where(multiplication_mask, queries, tf.zeros_like(queries))
keys = tf.where(multiplication_mask, keys, tf.zeros_like(keys))
values = tf.where(multiplication_mask, values, tf.zeros_like(values))
# Split and concat as many projections as the number of heads.
queries = tf.concat(
tf.split(queries, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
keys = tf.concat(
tf.split(keys, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
values = tf.concat(
tf.split(values, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
# Transpose multiplication and scale
attention_evidence = tf.matmul(
queries, tf.transpose(keys, [0, 2, 1])) # [B*num_heads, M, M]
attention_evidence = tf.math.divide(
attention_evidence, tf.constant(num_units ** 0.5))
# Mask columns (with values of -infinity), based on rows that have 0 sum.
attention_evidence_masked = mask(
attention_evidence, queries, keys, mask_type="key")
# Apply a non-linear layer to obtain (un-normalized) attention weights.
if attention_activation == "soft":
attention_weights = tf.nn.sigmoid(attention_evidence_masked)
elif attention_activation == "sharp":
attention_weights = tf.math.exp(attention_evidence_masked)
elif attention_activation == "linear":
attention_weights = attention_evidence_masked
elif attention_activation == "softmax":
attention_weights = tf.nn.softmax(attention_evidence_masked)
else:
raise ValueError("Unknown/unsupported attention activation: %s."
% attention_activation)
attention_weights_unnormalized = attention_weights
# Normalize attention weights.
if attention_activation != "softmax":
attention_weights /= tf.reduce_sum(
attention_weights, axis=-1, keep_dims=True)
# Mask rows (with values of 0), based on columns that have 0 sum.
attention_weights = mask(
attention_weights, queries, keys, mask_type="query")
attention_weights_unnormalized = mask(
attention_weights_unnormalized, queries, keys, mask_type="query")
# Apply a dropout layer on the attention weights.
if dropout > 0.0:
dropout_attention = (dropout * tf.cast(is_training, tf.float32)
+ (1.0 - tf.cast(is_training, tf.float32)))
attention_weights = tf.nn.dropout(
attention_weights, dropout_attention,
name="dropout_attention_weights") # [B*num_heads, M, M]
# [B*num_heads, M, num_units/num_heads]
product = tf.matmul(attention_weights, values)
product = tf.concat(
tf.split(product, num_heads), axis=2) # [B, M, num_units]
# Add a residual connection, followed by layer normalization.
if use_residual_connection:
product += inputs
product = layer_normalization(product) # [B, M, num_units]
processed_tensor = tf.reduce_sum(product, axis=1) # [B, num_units]
if hidden_units > 0:
processed_tensor = tf.layers.dense(
inputs=processed_tensor, units=hidden_units,
activation=tf.tanh, kernel_initializer=initializer) # [B, hidden_units]
sentence_scores = tf.layers.dense(
inputs=processed_tensor, units=num_sentence_labels,
kernel_initializer=initializer,
name="output_sent_specified_scores_ff") # [B, num_unique_sent_labels]
sentence_probabilities = tf.nn.softmax(sentence_scores)
sentence_predictions = tf.argmax(sentence_probabilities, axis=1) # [B]
# Obtain token scores from the attention weights.
# The token scores will have shape [B*num_heads, M, 1].
if token_scoring_method == "sum":
token_scores = tf.expand_dims(tf.reduce_sum(
attention_weights_unnormalized, axis=1), axis=2)
elif token_scoring_method == "max":
token_scores = tf.expand_dims(tf.reduce_max(
attention_weights_unnormalized, axis=1), axis=2)
elif token_scoring_method == "avg":
token_scores = tf.expand_dims(tf.reduce_mean(
attention_weights_unnormalized, axis=1), axis=2)
elif token_scoring_method == "logsumexp":
token_scores = tf.expand_dims(tf.reduce_logsumexp(
attention_weights_unnormalized, axis=1), axis=2)
else:
raise ValueError("Unknown/unsupported token scoring method: %s"
% token_scoring_method)
token_scores = tf.concat(
tf.split(token_scores, num_heads), axis=2) # [B, M, num_heads]
token_probabilities = tf.nn.softmax(token_scores)
token_predictions = tf.argmax(
token_probabilities, axis=2, output_type=tf.int32) # [B, M]
attention_weights = tf.concat(
tf.split(tf.expand_dims(attention_weights, axis=-1), num_heads),
axis=-1) # [B, M, M, num_heads]
return sentence_scores, sentence_predictions, \
token_scores, token_predictions, \
token_probabilities, sentence_probabilities, attention_weights
def multi_head_attention_with_scores_from_separate_heads(
inputs,
initializer,
attention_activation,
num_sentence_labels,
num_heads,
is_training,
dropout,
sentence_lengths,
normalize_sentence,
token_scoring_method,
scoring_activation=None,
separate_heads=True):
"""
Computes multi-head attention (mainly inspired by the transformer architecture).
This version of the implementation applies masking at several levels:
* first, the keys, queries and values so that the matrix multiplications
are performed only between meaningful positions
* second, the attention evidence values of 0 should be replaced with -infinity
so that when applying a non-linear layer, the resulted value is very close to 0.
* third, when obtaining the token probabilities (by normalizing across the scores),
division masking is performed (a value of 0 should be attributed to all 0 sums).
The masking performed before computing a primary/secondary loss is preserved.
:param inputs: 3D floats of shape [B, M, E]
:param initializer: type of initializer (best if Glorot or Xavier)
:param attention_activation: type of attention activation (linear, softmax or sigmoid)
:param num_sentence_labels: number of unique sentence labels
:param num_heads: number of unique token labels
:param is_training: if set to True, the current phase is a training one (rather than testing)
:param dropout: the keep_probs value for the dropout
:param sentence_lengths: the true sentence lengths, used for masking
:param normalize_sentence: if set to True, the last weighted sentence layer is normalized
:param token_scoring_method: can be either max, sum or avg
:param scoring_activation: used in computing the sentence scores from the token scores (per-head)
:param separate_heads: boolean value; when set to False, all heads
are used to obtain the sentence scores; when set to True, the default and non-default heads
from the token scores are used to obtain the sentence scores.
:return sentence_scores: 2D floats of shape [B, num_sentence_labels]
:return sentence_predictions: predicted labels for each sentence in the batch; ints of shape [B]
:return token_scores: 3D floats of shape [B, M, num_heads]
:return token_predictions: predicted labels for each token in each sentence; ints of shape [B, M]
"""
with tf.variable_scope("MHA_sentence_scores_from_separate_heads"):
num_units = inputs.get_shape().as_list()[-1]
if num_units % num_heads != 0:
num_units = ceil(num_units / num_heads) * num_heads
inputs = tf.layers.dense(inputs, num_units) # [B, M, num_units]
# Project to get the queries, keys, and values.
queries = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer) # [B, M, num_units]
keys = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer) # [B, M, num_units]
values = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer) # [B, M, num_units]
# Mask out the keys, queries and values: replace with 0 all the token
# positions between the true and the maximum sentence length.
multiplication_mask = tf.tile(
input=tf.expand_dims(tf.sequence_mask(sentence_lengths), axis=-1),
multiples=[1, 1, num_units]) # [B, M, num_units]
queries = tf.where(multiplication_mask, queries, tf.zeros_like(queries))
keys = tf.where(multiplication_mask, keys, tf.zeros_like(keys))
values = tf.where(multiplication_mask, values, tf.zeros_like(values))
# Split and concat as many projections as the number of heads.
queries = tf.concat(
tf.split(queries, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
keys = tf.concat(
tf.split(keys, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
# Transpose multiplication and scale
attention_evidence = tf.matmul(
queries, tf.transpose(keys, [0, 2, 1])) # [B*num_heads, M, M]
attention_evidence = tf.math.divide(
attention_evidence, tf.constant(num_units ** 0.5))
# Mask columns (with values of -infinity), based on rows that have 0 sum.
attention_evidence_masked = mask(
attention_evidence, queries, keys, mask_type="key")
# Apply a non-linear layer to obtain (un-normalized) attention weights.
if attention_activation == "soft":
attention_weights = tf.nn.sigmoid(attention_evidence_masked)
elif attention_activation == "sharp":
attention_weights = tf.math.exp(attention_evidence_masked)
elif attention_activation == "linear":
attention_weights = attention_evidence_masked
elif attention_activation == "softmax":
attention_weights = tf.nn.softmax(attention_evidence_masked)
else:
raise ValueError("Unknown/unsupported attention activation: %s."
% attention_activation)
# Normalize attention weights.
if attention_activation != "softmax":
attention_weights /= tf.reduce_sum(
attention_weights, axis=-1, keep_dims=True)
# Mask rows (with values of 0), based on columns that have 0 sum.
attention_weights = mask(
attention_weights, queries, keys, mask_type="query")
# Apply a dropout layer on the attention weights.
if dropout > 0.0:
dropout_attention = (dropout * tf.cast(is_training, tf.float32)
+ (1.0 - tf.cast(is_training, tf.float32)))
attention_weights = tf.nn.dropout(
attention_weights, dropout_attention,
name="dropout_attention_weights") # [B*num_heads, M, M]
# Obtain the token scores from the attention weights.
# The token_scores below will have shape [B*num_heads, 1, M].
if token_scoring_method == "sum":
token_scores = tf.reduce_sum(
attention_weights, axis=1, keep_dims=True)
elif token_scoring_method == "max":
token_scores = tf.reduce_max(
attention_weights, axis=1, keep_dims=True)
elif token_scoring_method == "avg":
token_scores = tf.reduce_mean(
attention_weights, axis=1, keep_dims=True)
elif token_scoring_method == "logsumexp":
token_scores = tf.reduce_logsumexp(
attention_weights, axis=1, keep_dims=True)
else:
raise ValueError("Unknown/unsupported token scoring method: %s"
% token_scoring_method)
token_scores = tf.concat(
tf.split(token_scores, num_heads),
axis=1) # [B, num_heads, M]
token_scores_normalized = division_masking(
inputs=token_scores, axis=-1,
multiplies=[1, 1, tf.shape(token_scores)[-1]]) # [B, num_heads, M]
token_probabilities = tf.nn.softmax(token_scores, axis=1)
token_predictions = tf.argmax(
token_probabilities, axis=1, output_type=tf.int32) # [B, M]
# Obtain a weighted sum between the inputs and the attention weights.
# [B, num_heads, num_units]
weighted_sum_representation = tf.matmul(token_scores_normalized, values)
if normalize_sentence:
weighted_sum_representation = layer_normalization(weighted_sum_representation)
if separate_heads:
# Get the sentence representations corresponding to the default head.
default_head = tf.gather(
weighted_sum_representation,
indices=[0], axis=1) # [B, 1, num_units]
# Get the sentence representations corresponding to the default head.
non_default_heads = tf.gather(
weighted_sum_representation,
indices=list(range(1, num_heads)), axis=1) # [B, num_heads-1, num_units]
# Project onto one unit, corresponding to
# the default sentence label score.
sentence_default_scores = tf.layers.dense(
default_head, units=1,
activation=scoring_activation, kernel_initializer=initializer,
name="sentence_default_scores_ff") # [B, 1, 1]
sentence_default_scores = tf.squeeze(
sentence_default_scores, axis=-1) # [B, 1]
# Project onto (num_sentence_labels-1) units, corresponding to
# the non-default sentence label scores.
sentence_non_default_scores = tf.layers.dense(
non_default_heads, units=num_sentence_labels-1,
activation=scoring_activation, kernel_initializer=initializer,
name="sentence_non_default_scores_ff") # [B, num_heads-1, num_sentence_labels-1]
sentence_non_default_scores = tf.reduce_mean(
sentence_non_default_scores, axis=1) # [B, num_sent_labels-1]
sentence_scores = tf.concat(
[sentence_default_scores, sentence_non_default_scores],
axis=-1, name="sentence_scores_concatenation") # [B, num_sent_labels]
else:
processed_tensor = tf.layers.dense(
inputs=weighted_sum_representation, units=num_sentence_labels,
activation=scoring_activation, kernel_initializer=initializer,
name="sentence_scores_ff") # [B, num_heads, num_unique_sent_labels]
sentence_scores = tf.reduce_sum(
processed_tensor, axis=1) # [B, num_sent_labels]
sentence_probabilities = tf.nn.softmax(sentence_scores)
sentence_predictions = tf.argmax(sentence_probabilities, axis=1) # [B]
# Get token scores and probabilities of shape # [B, M, num_heads].
token_scores = tf.transpose(token_scores, [0, 2, 1])
token_probabilities = tf.transpose(token_probabilities, [0, 2, 1])
attention_weights = tf.concat(
tf.split(tf.expand_dims(attention_weights, axis=-1), num_heads),
axis=-1) # [B, M, M, num_heads]
return sentence_scores, sentence_predictions, \
token_scores, token_predictions, \
token_probabilities, sentence_probabilities, attention_weights
def compute_scores_from_additive_attention(
inputs,
initializer,
attention_activation,
sentence_lengths,
attention_size=50,
hidden_units=50):
"""
Computes token and sentence scores from a single-head additive attention mechanism.
:param inputs: 3D floats of shape [B, M, E]
:param initializer: type of initializer (best if Glorot or Xavier)
:param attention_activation: type of attention activation (linear, softmax or sigmoid)
:param sentence_lengths: 2D ints of shape [B, M]
:param attention_size: number of units to use for the attention evidence
:param hidden_units: number of units to use for the processed sent tensor
:return sentence_scores: result of the attention * input; floats of shape [B]
:return token_scores: result of the un-normalized attention weights; floats of shape [B, M]
:return attention_weights: 2D floats of shape [B, M] of normalized token_scores
"""
with tf.variable_scope("compute_classic_single_head_attention"):
attention_evidence = tf.layers.dense(
inputs=inputs, units=attention_size,
activation=tf.tanh, kernel_initializer=initializer) # [B, M, attention_size]
attention_weights = tf.layers.dense(
inputs=attention_evidence, units=1,
kernel_initializer=initializer) # [B, M, 1]
attention_weights = tf.squeeze(attention_weights, axis=-1) # [B, M]
# Obtain the un-normalized attention weights.
if attention_activation == "soft":
attention_weights = tf.nn.sigmoid(attention_weights)
elif attention_activation == "sharp":
attention_weights = tf.exp(attention_weights)
elif attention_activation == "linear":
attention_weights = attention_weights
else:
raise ValueError("Unknown/unsupported attention activation: %s"
% attention_activation)
attention_weights = tf.where(
tf.sequence_mask(sentence_lengths),
attention_weights, tf.zeros_like(attention_weights))
token_scores = attention_weights # [B, M]
# Obtain the normalized attention weights (they will also be sentence weights).
attention_weights = attention_weights / tf.reduce_sum(
attention_weights, axis=1, keep_dims=True) # [B, M]
product = inputs * tf.expand_dims(attention_weights, axis=-1) # [B, M, num_units]
processed_tensor = tf.reduce_sum(product, axis=1) # [B, E]
if hidden_units > 0:
processed_tensor = tf.layers.dense(
inputs=processed_tensor, units=hidden_units,
activation=tf.tanh, kernel_initializer=initializer) # [B, hidden_units]
sentence_scores = tf.layers.dense(
inputs=processed_tensor, units=1,
activation=tf.sigmoid, kernel_initializer=initializer,
name="output_sent_single_head_ff") # [B, 1]
sentence_scores = tf.squeeze(sentence_scores, axis=-1)
return sentence_scores, token_scores, attention_weights
def compute_scores_from_scaled_dot_product_attention(
inputs,
initializer,
attention_activation,
sentence_lengths,
token_scoring_method):
"""
Computes token and sentence scores from a single-head scaled dot product attention mechanism.
:param inputs: 3D floats of shape [B, M, E]
:param initializer: type of initializer (best with Glorot or Xavier)
:param attention_activation: type of attention activation: sharp (exp) or soft (sigmoid)
:param sentence_lengths: 2D ints of shape [B, M]
:param token_scoring_method: can be either max, sum or avg
:return sentence_scores: 2D floats of shape [B, num_sentence_labels]
:return token_scores: 2D floats of shape [B, M]
:return token_probabilities: 2D floats of shape [B, M] of normalized token_scores
"""
with tf.variable_scope("compute_transformer_single_head_attention"):
num_units = inputs.get_shape().as_list()[-1]
# Project to get the queries, keys, and values, all of them of shape [B, M, num_units].
queries = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer)
keys = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer)
# Mask out the keys, queries and values: replace with 0 all the token
# positions between the true and the maximum sentence length.
multiplication_mask = tf.tile(
input=tf.expand_dims(tf.sequence_mask(sentence_lengths), axis=-1),
multiples=[1, 1, num_units]) # [B, M, num_units]
queries = tf.where(multiplication_mask, queries, tf.zeros_like(queries))
keys = tf.where(multiplication_mask, keys, tf.zeros_like(keys))
# Scaled dot-product attention.
attention_evidence = tf.matmul(
queries, tf.transpose(keys, [0, 2, 1])) # [B, M, M]
attention_evidence = tf.math.divide(
attention_evidence, tf.constant(num_units ** 0.5))
# Mask columns (with values of -infinity), based on rows that have 0 sum.
attention_evidence_masked = mask(
attention_evidence, queries, keys, mask_type="key")
# Obtain the un-normalized attention weights.
if attention_activation == "soft":
attention_weights = tf.nn.sigmoid(attention_evidence_masked)
elif attention_activation == "sharp":
attention_weights = tf.exp(attention_evidence_masked)
else:
raise ValueError("Unknown/unsupported activation for attention: %s"
% attention_activation)
attention_weights_unnormalized = attention_weights
# Normalize attention weights.
attention_weights /= tf.reduce_sum(
attention_weights, axis=-1, keep_dims=True) # [B, M, M]
# Mask rows (with values of 0), based on columns that have 0 sum.
attention_weights = mask(
attention_weights, queries, keys, mask_type="query")
attention_weights_unnormalized = mask(
attention_weights_unnormalized, queries, keys, mask_type="query")
# Obtain the token scores from the attention weights.
# The token_scores below will have shape [B, M].
if token_scoring_method == "sum":
token_scores = tf.reduce_sum(
attention_weights_unnormalized, axis=1)
elif token_scoring_method == "max":
token_scores = tf.reduce_max(
attention_weights_unnormalized, axis=1)
elif token_scoring_method == "avg":
token_scores = tf.reduce_mean(
attention_weights_unnormalized, axis=1)
elif token_scoring_method == "logsumexp":
token_scores = tf.reduce_logsumexp(
attention_weights_unnormalized, axis=1)
else:
raise ValueError("Unknown/unsupported token scoring method: %s"
% token_scoring_method)
token_scores_normalized = division_masking(
inputs=token_scores, axis=-1,
multiplies=[1, tf.shape(token_scores)[1]]) # [B, M]
# Sentence scores as a weighted sum between the inputs and the attention weights.
# weighted_sum_representation = tf.matmul(attention_weights, inputs)
weighted_sum_representation = inputs * tf.expand_dims(
token_scores_normalized, axis=-1) # [B, M, num_units]
processed_tensor = tf.reduce_sum(
weighted_sum_representation, axis=1) # [B, num_units]
sentence_scores = tf.layers.dense(
inputs=processed_tensor, units=1,
activation=tf.sigmoid, kernel_initializer=initializer,
name="sentence_scores_from_scaled_dot_product_ff") # [B, 1]
sentence_scores = tf.squeeze(sentence_scores, axis=-1) # [B]
return sentence_scores, token_scores, attention_weights
def single_head_attention_multiple_transformations(
inputs,
initializer,
attention_activation,
num_sentence_labels,
num_heads,
sentence_lengths,
token_scoring_method,
scoring_activation=None,
how_to_compute_attention="dot",
separate_heads=True):
"""
Computes token and sentence scores using a single-head attention mechanism,
which can either be additive (mainly inspired by the single-head binary-label
method above, as in Rei and Sogaard paper https://arxiv.org/pdf/1811.05949.pdf)
or a scaled-dot product version (inspired by the transformer, but with just one head).
Then, use these scores to obtain predictions at both granularities.
:param inputs: 3D floats of shape [B, M, E]
:param initializer: type of initializer (best if Glorot or Xavier)
:param attention_activation
:param num_sentence_labels: number of unique sentence labels
:param num_heads: number of unique token labels
:param sentence_lengths: the true sentence lengths, used for masking
:param token_scoring_method
:param scoring_activation: activation used for scoring, default is None.
:param how_to_compute_attention: compute attention in the classic way (Marek) or as in transformer
:param separate_heads: boolean value; when set to False, all heads
are used to obtain the sentence scores; when set to True, the default and non-default heads
from the token scores are used to obtain the sentence scores.
:return sentence_scores: 2D floats of shape [B, num_sentence_labels]
:return sentence_predictions: predicted labels for each sentence in the batch; ints of shape [B]
:return token_scores: 3D floats of shape [B, M, num_heads]
:return token_predictions: predicted labels for each token in each sentence; ints of shape [B, M]
"""
with tf.variable_scope("transformer_single_heads_multi_attention"):
token_scores_per_head = []
sentence_scores_per_head = []
attention_weights_per_head = []
for i in range(num_heads):
with tf.variable_scope("num_head_{}".format(i), reuse=tf.AUTO_REUSE):
if how_to_compute_attention == "additive":
sentence_scores_head_i, token_scores_head_i, attention_weights_head_i = \
compute_scores_from_additive_attention(
inputs=inputs, initializer=initializer,
attention_activation=attention_activation,
sentence_lengths=sentence_lengths)
elif how_to_compute_attention == "dot":
sentence_scores_head_i, token_scores_head_i, attention_weights_head_i = \
compute_scores_from_scaled_dot_product_attention(
inputs=inputs, initializer=initializer,
attention_activation=attention_activation,
sentence_lengths=sentence_lengths,
token_scoring_method=token_scoring_method)
else:
raise ValueError("Unknown/unsupported way of computing the attention: %s"
% how_to_compute_attention)
sentence_scores_per_head.append(sentence_scores_head_i)
token_scores_per_head.append(token_scores_head_i)
attention_weights_per_head.append(attention_weights_head_i)
sentence_scores = tf.stack(sentence_scores_per_head, axis=-1) # [B, num_heads]
if separate_heads:
sentence_default_score = tf.layers.dense(
inputs=tf.expand_dims(sentence_scores[:, 0], axis=-1), units=1,
activation=scoring_activation, kernel_initializer=initializer,
name="ff_non_default_sentence_scores")
sentence_non_default_scores = tf.layers.dense(
inputs=sentence_scores[:, 1:], units=num_sentence_labels-1,
activation=scoring_activation, kernel_initializer=initializer,
name="ff_default_sentence_scores")
sentence_scores = tf.concat(
[sentence_default_score, sentence_non_default_scores],
axis=-1, name="sentence_scores_concatenation")
else:
sentence_scores = tf.layers.dense(
inputs=sentence_scores, units=num_sentence_labels,
activation=scoring_activation, kernel_initializer=initializer,
name="ff_sentence_scores") # [B, num_sentence_labels]
sentence_probabilities = tf.nn.softmax(sentence_scores)
sentence_predictions = tf.argmax(sentence_probabilities, axis=1) # [B]
token_scores = tf.stack(token_scores_per_head, axis=-1) # [B, M, num_heads]
token_probabilities = tf.nn.softmax(token_scores, axis=-1) # [B, M, num_heads]
token_predictions = tf.argmax(token_probabilities, axis=-1) # [B, M]
# Will be of shape [B, M, H] if an additive attention was used, or
# of shape [B, M, M, H] if a scaled-dot product attention was used.
attention_weights = tf.stack(attention_weights_per_head, axis=-1)
return sentence_scores, sentence_predictions, token_scores, token_predictions, \
token_probabilities, sentence_probabilities, attention_weights
def variant_1(
inputs,
initializer,
attention_activation,
num_sentence_labels,
num_heads,
hidden_units,
sentence_lengths,
scoring_activation=None,
token_scoring_method="max",
use_inputs_instead_values=False,
separate_heads=True):
"""
Variant 1 of the multi-head attention to obtain sentence and token scores and predictions.
"""
with tf.variable_scope("variant_1"):
num_units = inputs.get_shape().as_list()[-1]
if num_units % num_heads != 0:
num_units = ceil(num_units / num_heads) * num_heads
inputs = tf.layers.dense(inputs, num_units) # [B, M, num_units]
# Project to get the queries, keys, and values.
queries = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer) # [B, M, num_units]
keys = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer) # [B, M, num_units]
values = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer) # [B, M, num_units]
# Mask out the keys, queries and values: replace with 0 all the token
# positions between the true and the maximum sentence length.
multiplication_mask = tf.tile(
input=tf.expand_dims(tf.sequence_mask(sentence_lengths), axis=-1),
multiples=[1, 1, num_units]) # [B, M, num_units]
queries = tf.where(multiplication_mask, queries, tf.zeros_like(queries))
keys = tf.where(multiplication_mask, keys, tf.zeros_like(keys))
# Split and concat as many projections as the number of heads.
queries = tf.concat(
tf.split(queries, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
keys = tf.concat(
tf.split(keys, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
values = tf.concat(
tf.split(values, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
inputs = tf.concat(
tf.split(inputs, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
# Transpose multiplication and scale
attention_evidence = tf.matmul(
queries, tf.transpose(keys, [0, 2, 1])) # [B*num_heads, M, M]
attention_evidence = tf.math.divide(
attention_evidence, tf.constant(num_units ** 0.5))
# Mask columns (with values of -infinity), based on rows that have 0 sum.
attention_evidence_masked = mask(
attention_evidence, queries, keys, mask_type="key")
# Apply a non-linear layer to obtain (un-normalized) attention weights.
if attention_activation == "soft":
attention_weights = tf.nn.sigmoid(attention_evidence_masked)
elif attention_activation == "sharp":
attention_weights = tf.math.exp(attention_evidence_masked)
elif attention_activation == "linear":
attention_weights = attention_evidence_masked
else:
raise ValueError("Unknown/unsupported attention activation: %s."
% attention_activation)
attention_weights_unnormalized = attention_weights
# Normalize attention weights.
attention_weights /= tf.reduce_sum(
attention_weights, axis=-1, keep_dims=True)
# Mask rows (with values of 0), based on columns that have 0 sum.
attention_weights = mask(
attention_weights, queries, keys, mask_type="query")
attention_weights_unnormalized = mask(
attention_weights_unnormalized, queries, keys, mask_type="query")
# [B*num_heads, M, num_units/num_heads]
if use_inputs_instead_values:
product = tf.matmul(attention_weights, inputs)
else:
product = tf.matmul(attention_weights, values)
product = tf.reduce_sum(product, axis=1) # [B*num_heads, num_units/num_heads]
product = tf.layers.dense(
inputs=product, units=hidden_units,
activation=tf.tanh, kernel_initializer=initializer) # [B*num_heads, hidden_units]
processed_tensor = tf.layers.dense(
inputs=product, units=1,
kernel_initializer=initializer) # [B*num_heads, 1]
processed_tensor = tf.concat(
tf.split(processed_tensor, num_heads), axis=1) # [B, num_heads]
if separate_heads:
if num_sentence_labels == num_heads:
sentence_scores = processed_tensor
else:
# Get the sentence representations corresponding to the default head.
default_head = tf.gather(
processed_tensor,
indices=[0], axis=-1) # [B, 1]
# Get the sentence representations corresponding to the non-default head.
non_default_heads = tf.gather(
processed_tensor,
indices=list(range(1, num_heads)), axis=-1) # [B, num_heads-1]
# Project onto one unit, corresponding to the default sentence label score.
sentence_default_scores = tf.layers.dense(
default_head, units=1,
activation=scoring_activation, kernel_initializer=initializer,
name="sentence_default_scores_ff") # [B, 1]
# Project onto (num_sentence_labels-1) units, corresponding to
# the non-default sentence label scores.
sentence_non_default_scores = tf.layers.dense(
non_default_heads, units=num_sentence_labels - 1,
activation=scoring_activation, kernel_initializer=initializer,
name="sentence_non_default_scores_ff") # [B, num_sentence_labels-1]
sentence_scores = tf.concat(
[sentence_default_scores, sentence_non_default_scores],
axis=-1, name="sentence_scores_concatenation") # [B, num_sent_labels]
else:
sentence_scores = tf.layers.dense(
inputs=processed_tensor, units=num_sentence_labels,
activation=scoring_activation, kernel_initializer=initializer,
name="output_sent_specified_scores_ff") # [B, num_sent_labels]
sentence_probabilities = tf.nn.softmax(sentence_scores)
sentence_predictions = tf.argmax(sentence_probabilities, axis=1) # [B]
# Obtain token scores from attention weights. Shape is [B*num_heads, M].
if token_scoring_method == "sum":
token_scores = tf.reduce_sum(attention_weights_unnormalized, axis=1)
elif token_scoring_method == "max":
token_scores = tf.reduce_max(attention_weights_unnormalized, axis=1)
elif token_scoring_method == "avg":
token_scores = tf.reduce_mean(attention_weights_unnormalized, axis=1)
elif token_scoring_method == "logsumexp":
token_scores = tf.reduce_logsumexp(attention_weights_unnormalized, axis=1)
else:
raise ValueError("Unknown/unsupported token scoring method: %s"
% token_scoring_method)
token_scores = tf.expand_dims(token_scores, axis=2) # [B*num_heads, M, 1]
token_scores = tf.concat(
tf.split(token_scores, num_heads), axis=2) # [B, M, num_heads]
token_probabilities = tf.nn.softmax(token_scores)
token_predictions = tf.argmax(
token_probabilities, axis=2, output_type=tf.int32) # [B, M]
attention_weights = tf.concat(
tf.split(tf.expand_dims(attention_weights, axis=-1), num_heads),
axis=-1) # [B, M, M, num_heads]
return sentence_scores, sentence_predictions, \
token_scores, token_predictions, \
token_probabilities, sentence_probabilities, attention_weights
def variant_2(
inputs,
initializer,
attention_activation,
num_sentence_labels,
num_heads,
hidden_units,
sentence_lengths,
scoring_activation=None,
use_inputs_instead_values=False,
separate_heads=True):
"""
Variant 2 of the multi-head attention to obtain sentence and token scores and predictions.
"""
with tf.variable_scope("variant_2"):
num_units = inputs.get_shape().as_list()[-1]
if num_units % num_heads != 0:
num_units = ceil(num_units / num_heads) * num_heads
inputs = tf.layers.dense(inputs, num_units) # [B, M, num_units]
# Project to get the queries, keys, and values.
queries = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer) # [B, M, num_units]
keys = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer) # [B, M, num_units]
values = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer) # [B, M, num_units]
# Mask out the keys, queries and values: replace with 0 all the token
# positions between the true and the maximum sentence length.
multiplication_mask = tf.tile(
input=tf.expand_dims(tf.sequence_mask(sentence_lengths), axis=-1),
multiples=[1, 1, num_units]) # [B, M, num_units]
keys = tf.where(multiplication_mask, keys, tf.zeros_like(keys))
# Split and concat as many projections as the number of heads.
queries = tf.concat(
tf.split(queries, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
# [B*num_heads, 1, num_units/num_heads]
queries = tf.reduce_sum(queries, axis=1, keep_dims=True)
keys = tf.concat(
tf.split(keys, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
values = tf.concat(
tf.split(values, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
inputs = tf.concat(
tf.split(inputs, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
# Transpose multiplication and scale
attention_evidence = tf.matmul(
queries, tf.transpose(keys, [0, 2, 1])) # [B*num_heads, 1, M]
attention_evidence = tf.math.divide(
attention_evidence, tf.constant(num_units ** 0.5))
# Mask columns (with values of -infinity), based on rows that have 0 sum.
attention_evidence_masked = mask(
attention_evidence, queries, keys, mask_type="key")
# Apply a non-linear layer to obtain (un-normalized) attention weights.
if attention_activation == "soft":
attention_weights = tf.nn.sigmoid(attention_evidence_masked)
elif attention_activation == "sharp":
attention_weights = tf.math.exp(attention_evidence_masked)
elif attention_activation == "linear":
attention_weights = attention_evidence_masked
else:
raise ValueError("Unknown/unsupported attention activation: %s."
% attention_activation)
attention_weights_unnormalized = attention_weights
# Normalize attention weights.
attention_weights /= tf.reduce_sum(
attention_weights, axis=-1, keep_dims=True)
# Mask rows (with values of 0), based on columns that have 0 sum.
attention_weights = mask(
attention_weights, queries, keys, mask_type="query")
attention_weights_unnormalized = mask(
attention_weights_unnormalized, queries, keys, mask_type="query")
# Transpose attention weights.
attention_weights = tf.transpose(
attention_weights, [0, 2, 1]) # [B*num_heads, M, 1]
# [B*num_heads, M, num_units/num_heads]
if use_inputs_instead_values:
product = inputs * attention_weights
else:
product = values * attention_weights
product = tf.reduce_sum(product, axis=1) # [B*num_heads, num_units/num_heads]
product = tf.layers.dense(
inputs=product, units=hidden_units,
activation=tf.tanh, kernel_initializer=initializer) # [B*num_heads, hidden_units]
processed_tensor = tf.layers.dense(
inputs=product, units=1,
kernel_initializer=initializer) # [B*num_heads, 1]
processed_tensor = tf.concat(
tf.split(processed_tensor, num_heads), axis=1) # [B, num_heads]
if separate_heads:
if num_sentence_labels == num_heads:
sentence_scores = processed_tensor
else:
# Get the sentence representations corresponding to the default head.
default_head = tf.gather(
processed_tensor,
indices=[0], axis=-1) # [B, 1]
# Get the sentence representations corresponding to the non-default head.
non_default_heads = tf.gather(
processed_tensor,
indices=list(range(1, num_heads)), axis=-1) # [B, num_heads-1]
# Project onto one unit, corresponding to the default sentence label score.
sentence_default_scores = tf.layers.dense(
default_head, units=1,
activation=scoring_activation, kernel_initializer=initializer,
name="sentence_default_scores_ff") # [B, 1]
# Project onto (num_sentence_labels-1) units, corresponding to
# the non-default sentence label scores.
sentence_non_default_scores = tf.layers.dense(
non_default_heads, units=num_sentence_labels - 1,
activation=scoring_activation, kernel_initializer=initializer,
name="sentence_non_default_scores_ff") # [B, num_sentence_labels-1]
sentence_scores = tf.concat(
[sentence_default_scores, sentence_non_default_scores],
axis=-1, name="sentence_scores_concatenation") # [B, num_sent_labels]
else:
sentence_scores = tf.layers.dense(
inputs=processed_tensor, units=num_sentence_labels,
activation=scoring_activation, kernel_initializer=initializer,
name="output_sent_specified_scores_ff") # [B, num_sent_labels]
sentence_probabilities = tf.nn.softmax(sentence_scores)
sentence_predictions = tf.argmax(sentence_probabilities, axis=1) # [B]
# Obtain token scores from attention weights.
token_scores = tf.transpose(
attention_weights_unnormalized, [0, 2, 1]) # [num_heads*B, M, 1]
token_scores = tf.concat(
tf.split(token_scores, num_heads), axis=2) # [B, M, num_heads]
token_probabilities = tf.nn.softmax(token_scores)
token_predictions = tf.argmax(
token_probabilities, axis=2, output_type=tf.int32) # [B, M]
attention_weights = tf.concat(
tf.split(tf.transpose(attention_weights, [0, 2, 1]), num_heads),
axis=-1) # [B, M, num_heads]
return sentence_scores, sentence_predictions, \
token_scores, token_predictions, \
token_probabilities, sentence_probabilities, attention_weights
def variant_3(
inputs,
initializer,
attention_activation,
num_sentence_labels,
num_heads,
attention_size,
sentence_lengths,
scoring_activation=None,
separate_heads=True):
"""
Variant 3 of the multi-head attention to obtain sentence and token scores and predictions.
"""
with tf.variable_scope("variant_3"):
num_units = inputs.get_shape().as_list()[-1]
if num_units % num_heads != 0:
num_units = ceil(num_units / num_heads) * num_heads
inputs = tf.layers.dense(inputs, num_units) # [B, M, num_units]
# Trainable parameters
w_omega = tf.Variable(
tf.random_normal([num_heads, num_units, attention_size],
stddev=0.1)) # [num_heads, num_units, A]
b_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
u_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
# Computing the attention score, of shape [B, M, H, A].
attention_evidence = tf.tanh(tf.tensordot(inputs, w_omega, axes=[[2], [1]]) + b_omega)
attention_evidence = tf.tensordot(
attention_evidence, u_omega, axes=[[-1], [0]],
name='attention_evidence_score') # [B, M, H]
# Apply a non-linear layer to obtain (un-normalized) attention weights.
if attention_activation == "soft":
attention_weights_unnormalized = tf.nn.sigmoid(attention_evidence)
elif attention_activation == "sharp":
attention_weights_unnormalized = tf.math.exp(attention_evidence)
elif attention_activation == "linear":
attention_weights_unnormalized = attention_evidence
else:
raise ValueError("Unknown/unsupported attention activation: %s."
% attention_activation)
tiled_sentence_lengths = tf.tile(
input=tf.expand_dims(
tf.sequence_mask(sentence_lengths), axis=-1),
multiples=[1, 1, num_heads])
attention_weights_unnormalized = tf.where(
tiled_sentence_lengths,
attention_weights_unnormalized,
tf.zeros_like(attention_weights_unnormalized))
attention_weights = attention_weights_unnormalized / tf.reduce_sum(
attention_weights_unnormalized, axis=1, keep_dims=True) # [B, M, H]
# Prepare alphas and input.
attention_weights = tf.transpose(attention_weights, [0, 2, 1]) # [B, H, M, 1]
inputs = tf.tile(
input=tf.expand_dims(inputs, axis=1),
multiples=[1, num_heads, 1, 1]) # [B, H, M, E]
product = inputs * tf.expand_dims(attention_weights, axis=-1) # [B, H, M, E]
output = tf.reduce_sum(product, axis=2) # [B, H, E]
processed_tensor = tf.squeeze(tf.layers.dense(
inputs=output, units=1,
kernel_initializer=initializer), axis=-1) # [B, num_heads]
if separate_heads:
if num_sentence_labels == num_heads:
sentence_scores = processed_tensor
else:
# Get the sentence representations corresponding to the default head.
default_head = tf.gather(
processed_tensor,
indices=[0], axis=-1) # [B, 1]
# Get the sentence representations corresponding to the non-default head.
non_default_heads = tf.gather(
processed_tensor,
indices=list(range(1, num_heads)), axis=-1) # [B, num_heads-1]
# Project onto one unit, corresponding to the default sentence label score.
sentence_default_scores = tf.layers.dense(
default_head, units=1,
activation=scoring_activation, kernel_initializer=initializer,
name="sentence_default_scores_ff") # [B, 1]
# Project onto (num_sentence_labels-1) units, corresponding to
# the non-default sentence label scores.
sentence_non_default_scores = tf.layers.dense(
non_default_heads, units=num_sentence_labels - 1,
activation=scoring_activation, kernel_initializer=initializer,
name="sentence_non_default_scores_ff") # [B, num_sentence_labels-1]
sentence_scores = tf.concat(
[sentence_default_scores, sentence_non_default_scores],
axis=-1, name="sentence_scores_concatenation") # [B, num_sent_labels]
else:
sentence_scores = tf.layers.dense(
inputs=processed_tensor, units=num_sentence_labels,
activation=scoring_activation, kernel_initializer=initializer,
name="output_sent_specified_scores_ff") # [B, num_sent_labels]
sentence_probabilities = tf.nn.softmax(sentence_scores)
sentence_predictions = tf.argmax(sentence_probabilities, axis=1) # [B]
token_scores = attention_weights_unnormalized # [B, M, num_heads]
token_probabilities = tf.nn.softmax(token_scores)
token_predictions = tf.argmax(
token_probabilities, axis=2, output_type=tf.int32) # [B, M]
return sentence_scores, sentence_predictions, \
token_scores, token_predictions, \
token_probabilities, sentence_probabilities, attention_weights
def variant_4(
inputs,
initializer,
attention_activation,
num_sentence_labels,
num_heads,
hidden_units,
sentence_lengths,
scoring_activation=None,
token_scoring_method="max",
use_inputs_instead_values=False,
separate_heads=True):
"""
Variant 4 of the multi-head attention to obtain sentence and token scores and predictions.
"""
with tf.variable_scope("variant_4"):
num_units = inputs.get_shape().as_list()[-1]
if num_units % num_heads != 0:
num_units = ceil(num_units / num_heads) * num_heads
inputs = tf.layers.dense(inputs, num_units) # [B, M, num_units]
# Project to get the queries, keys, and values.
queries = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer) # [B, M, num_units]
keys = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer) # [B, M, num_units]
values = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer) # [B, M, num_units]
# Mask out the keys, queries and values: replace with 0 all the token
# positions between the true and the maximum sentence length.
multiplication_mask = tf.tile(
input=tf.expand_dims(tf.sequence_mask(sentence_lengths), axis=-1),
multiples=[1, 1, num_units]) # [B, M, num_units]
queries = tf.where(multiplication_mask, queries, tf.zeros_like(queries))
keys = tf.where(multiplication_mask, keys, tf.zeros_like(keys))
values = tf.where(multiplication_mask, values, tf.zeros_like(values))
# Split and concat as many projections as the number of heads.
queries = tf.concat(
tf.split(queries, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
keys = tf.concat(
tf.split(keys, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
values = tf.concat(
tf.split(values, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
inputs = tf.concat(
tf.split(inputs, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
# Transpose multiplication and scale
attention_evidence = tf.matmul(
queries, tf.transpose(keys, [0, 2, 1])) # [B*num_heads, M, M]
attention_evidence = tf.math.divide(
attention_evidence, tf.constant(num_units ** 0.5))
# Mask columns (with values of -infinity), based on rows that have 0 sum.
attention_evidence_masked = mask(
attention_evidence, queries, keys, mask_type="key")
# Apply a non-linear layer to obtain (un-normalized) attention weights.
if attention_activation == "soft":
attention_weights_unnormalized = tf.nn.sigmoid(attention_evidence_masked)
elif attention_activation == "sharp":
attention_weights_unnormalized = tf.math.exp(attention_evidence_masked)
elif attention_activation == "linear":
attention_weights_unnormalized = attention_evidence_masked
else:
raise ValueError("Unknown/unsupported attention activation: %s."
% attention_activation)
attention_weights_unnormalized = mask( # [B*num_heads, M, M]
attention_weights_unnormalized, queries, keys, mask_type="query")
# Obtain token scores from attention weights. Shape is [B*num_heads, M].
if token_scoring_method == "sum":
attention_weights_unnormalized = tf.reduce_sum(
attention_weights_unnormalized, axis=1)
elif token_scoring_method == "max":
attention_weights_unnormalized = tf.reduce_max(
attention_weights_unnormalized, axis=1)
elif token_scoring_method == "avg":
attention_weights_unnormalized = tf.reduce_mean(
attention_weights_unnormalized, axis=1)
elif token_scoring_method == "logsumexp":
attention_weights_unnormalized = tf.reduce_logsumexp(
attention_weights_unnormalized, axis=1)
else:
raise ValueError("Unknown/unsupported token scoring method: %s"
% token_scoring_method)
# Normalize to obtain attention weights.
attention_weights = attention_weights_unnormalized / tf.reduce_sum(
attention_weights_unnormalized, axis=1, keep_dims=True)
token_scores = tf.concat(
tf.split(tf.expand_dims(attention_weights_unnormalized, axis=2), num_heads),
axis=2) # [B, M, num_heads]
token_probabilities = tf.nn.softmax(token_scores)
token_predictions = tf.argmax(
token_probabilities, axis=2, output_type=tf.int32) # [B, M]
if use_inputs_instead_values:
product = tf.reduce_sum(inputs * tf.expand_dims(attention_weights, axis=-1),
axis=1) # [B*num_heads, num_units/num_heads]
else:
product = tf.reduce_sum(values * tf.expand_dims(attention_weights, axis=-1),
axis=1) # [B*num_heads, num_units/num_heads]
product = tf.layers.dense(
inputs=product, units=hidden_units,
activation=tf.tanh, kernel_initializer=initializer) # [B*num_heads, hidden_units]
processed_tensor = tf.layers.dense(
inputs=product, units=1,
kernel_initializer=initializer) # [B*num_heads, 1]
processed_tensor = tf.concat(
tf.split(processed_tensor, num_heads), axis=1) # [B, num_heads]
if separate_heads:
if num_sentence_labels == num_heads:
sentence_scores = processed_tensor
else:
# Get the sentence representations corresponding to the default head.
default_head = tf.gather(
processed_tensor,
indices=[0], axis=-1) # [B, 1]
# Get the sentence representations corresponding to the non-default head.
non_default_heads = tf.gather(
processed_tensor,
indices=list(range(1, num_heads)), axis=-1) # [B, num_heads-1]
# Project onto one unit, corresponding to the default sentence label score.
sentence_default_scores = tf.layers.dense(
default_head, units=1,
activation=scoring_activation, kernel_initializer=initializer,
name="sentence_default_scores_ff") # [B, 1]
# Project onto (num_sentence_labels-1) units, corresponding to
# the non-default sentence label scores.
sentence_non_default_scores = tf.layers.dense(
non_default_heads, units=num_sentence_labels - 1,
activation=scoring_activation, kernel_initializer=initializer,
name="sentence_non_default_scores_ff") # [B, num_sentence_labels-1]
sentence_scores = tf.concat(
[sentence_default_scores, sentence_non_default_scores],
axis=-1, name="sentence_scores_concatenation") # [B, num_sent_labels]
else:
sentence_scores = tf.layers.dense(
inputs=processed_tensor, units=num_sentence_labels,
activation=scoring_activation, kernel_initializer=initializer,
name="output_sent_specified_scores_ff") # [B, num_sent_labels]
sentence_probabilities = tf.nn.softmax(sentence_scores)
sentence_predictions = tf.argmax(sentence_probabilities, axis=1) # [B]
attention_weights = tf.concat(
tf.split(tf.expand_dims(attention_weights, axis=-1), num_heads),
axis=-1) # [B, M, num_heads]
return sentence_scores, sentence_predictions, \
token_scores, token_predictions, \
token_probabilities, sentence_probabilities, attention_weights
def variant_5(
inputs,
initializer,
attention_activation,
num_sentence_labels,
num_heads,
hidden_units,
sentence_lengths,
scoring_activation=None,
token_scoring_method="max",
use_inputs_instead_values=False,
separate_heads=True):
"""
Variant 5 of the multi-head attention to obtain sentence and token scores and predictions.
"""
with tf.variable_scope("variant_5"):
num_units = inputs.get_shape().as_list()[-1]
if num_units % num_heads != 0:
num_units = ceil(num_units / num_heads) * num_heads
inputs = tf.layers.dense(inputs, num_units) # [B, M, num_units]
# Project to get the queries, keys, and values.
queries = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer) # [B, M, num_units]
keys = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer) # [B, M, num_units]
values = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer) # [B, M, num_units]
# Mask out the keys, queries and values: replace with 0 all the token
# positions between the true and the maximum sentence length.
multiplication_mask = tf.tile(
input=tf.expand_dims(tf.sequence_mask(sentence_lengths), axis=-1),
multiples=[1, 1, num_units]) # [B, M, num_units]
queries = tf.where(multiplication_mask, queries, tf.zeros_like(queries))
keys = tf.where(multiplication_mask, keys, tf.zeros_like(keys))
values = tf.where(multiplication_mask, values, tf.zeros_like(values))
# Split and concat as many projections as the number of heads.
queries = tf.concat(
tf.split(queries, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
keys = tf.concat(
tf.split(keys, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
values = tf.concat(
tf.split(values, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
inputs = tf.concat(
tf.split(inputs, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
# Transpose multiplication and scale
attention_evidence = tf.matmul(
queries, tf.transpose(keys, [0, 2, 1])) # [B*num_heads, M, M]
attention_evidence = tf.math.divide(
attention_evidence, tf.constant(num_units ** 0.5))
# Obtain token scores from attention weights. Shape is [B*num_heads, M].
if token_scoring_method == "sum":
attention_evidence = tf.reduce_sum(
attention_evidence, axis=1)
elif token_scoring_method == "max":
attention_evidence = tf.reduce_max(
attention_evidence, axis=1)
elif token_scoring_method == "avg":
attention_evidence = tf.reduce_mean(
attention_evidence, axis=1)
elif token_scoring_method == "logsumexp":
attention_evidence = tf.reduce_logsumexp(
attention_evidence, axis=1)
else:
raise ValueError("Unknown/unsupported token scoring method: %s"
% token_scoring_method)
# Apply a non-linear layer to obtain un-normalized attention weights.
if attention_activation == "soft":
attention_weights_unnormalized = tf.nn.sigmoid(attention_evidence)
elif attention_activation == "sharp":
attention_weights_unnormalized = tf.math.exp(attention_evidence)
elif attention_activation == "linear":
attention_weights_unnormalized = attention_evidence
else:
raise ValueError("Unknown/unsupported attention activation: %s."
% attention_activation)
tiled_sentence_lengths = tf.tile(
input=tf.sequence_mask(sentence_lengths), multiples=[num_heads, 1])
attention_weights_unnormalized = tf.where(
tiled_sentence_lengths,
attention_weights_unnormalized,
tf.zeros_like(attention_weights_unnormalized))
# Normalize to obtain attention weights of shape [B*num_heads, M].
attention_weights = attention_weights_unnormalized / tf.reduce_sum(
attention_weights_unnormalized, axis=1, keep_dims=True)
token_scores = tf.concat(
tf.split(tf.expand_dims(attention_weights_unnormalized, axis=2), num_heads),
axis=2) # [B, M, num_heads]
token_probabilities = tf.nn.softmax(token_scores)
token_predictions = tf.argmax(
token_probabilities, axis=2, output_type=tf.int32) # [B, M]
if use_inputs_instead_values:
product = tf.reduce_sum(inputs * tf.expand_dims(attention_weights, axis=-1),
axis=1) # [B*num_heads, num_units/num_heads]
else:
product = tf.reduce_sum(values * tf.expand_dims(attention_weights, axis=-1),
axis=1) # [B*num_heads, num_units/num_heads]
product = tf.layers.dense(
inputs=product, units=hidden_units,
activation=tf.tanh, kernel_initializer=initializer) # [B*num_heads, hidden_units]
processed_tensor = tf.layers.dense(
inputs=product, units=1,
kernel_initializer=initializer) # [B*num_heads, 1]
processed_tensor = tf.concat(
tf.split(processed_tensor, num_heads), axis=1) # [B, num_heads]
if separate_heads:
if num_sentence_labels == num_heads:
sentence_scores = processed_tensor
else:
# Get the sentence representations corresponding to the default head.
default_head = tf.gather(
processed_tensor,
indices=[0], axis=-1) # [B, 1]
# Get the sentence representations corresponding to the non-default head.
non_default_heads = tf.gather(
processed_tensor,
indices=list(range(1, num_heads)), axis=-1) # [B, num_heads-1]
# Project onto one unit, corresponding to the default sentence label score.
sentence_default_scores = tf.layers.dense(
default_head, units=1,
activation=scoring_activation, kernel_initializer=initializer,
name="sentence_default_scores_ff") # [B, 1]
# Project onto (num_sentence_labels-1) units, corresponding to
# the non-default sentence label scores.
sentence_non_default_scores = tf.layers.dense(
non_default_heads, units=num_sentence_labels - 1,
activation=scoring_activation, kernel_initializer=initializer,
name="sentence_non_default_scores_ff") # [B, num_sentence_labels-1]
sentence_scores = tf.concat(
[sentence_default_scores, sentence_non_default_scores],
axis=-1, name="sentence_scores_concatenation") # [B, num_sent_labels]
else:
sentence_scores = tf.layers.dense(
inputs=processed_tensor, units=num_sentence_labels,
activation=scoring_activation, kernel_initializer=initializer,
name="output_sent_specified_scores_ff") # [B, num_sent_labels]
sentence_probabilities = tf.nn.softmax(sentence_scores)
sentence_predictions = tf.argmax(sentence_probabilities, axis=1) # [B]
attention_weights = tf.concat(
tf.split(tf.expand_dims(attention_weights, axis=-1), num_heads),
axis=-1) # [B, M, num_heads]
return sentence_scores, sentence_predictions, \
token_scores, token_predictions, \
token_probabilities, sentence_probabilities, attention_weights
def variant_6(
inputs,
initializer,
attention_activation,
num_sentence_labels,
num_heads,
hidden_units,
scoring_activation=None,
token_scoring_method="max",
separate_heads=True):
"""
Variant 6 of the multi-head attention to obtain sentence and token scores and predictions.
"""
with tf.variable_scope("variant_6"):
num_units = inputs.get_shape().as_list()[-1]
keys_list = []
queries_list = []
values_list = []
for i in range(num_heads):
with tf.variable_scope("num_head_{}".format(i), reuse=tf.AUTO_REUSE):
keys_this_head = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer) # [B, M, num_units]
queries_this_head = tf.layers.dense(
inputs, num_units, activation=tf.nn.relu,
kernel_regularizer=tf.contrib.layers.l2_regularizer(scale=0.7),
kernel_initializer=initializer) # [B, M, num_units]
values_this_head = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=initializer) # [B, M, num_units]
keys_list.append(keys_this_head)
queries_list.append(queries_this_head)
values_list.append(values_this_head)
keys = tf.stack(keys_list) # [num_heads, B, M, num_units]
queries = tf.stack(queries_list) # [num_heads, B, M, num_units]
values = tf.stack(values_list) # [num_heads, B, M, num_units]
# Transpose multiplication and scale
attention_evidence = tf.matmul(
queries, tf.transpose(keys, [0, 1, 3, 2])) # [num_heads, B, M, M]
attention_evidence = tf.math.divide(
attention_evidence, tf.constant(num_units ** 0.5))
# Mask columns (with values of -infinity), based on rows that have 0 sum.
attention_evidence_masked = mask_2(
attention_evidence, queries, keys, mask_type="key")
# Apply a non-linear layer to obtain (un-normalized) attention weights.
if attention_activation == "soft":
attention_weights = tf.nn.sigmoid(attention_evidence_masked)
elif attention_activation == "sharp":
attention_weights = tf.math.exp(attention_evidence_masked)
elif attention_activation == "linear":
attention_weights = attention_evidence_masked
else:
raise ValueError("Unknown/unsupported attention activation: %s."
% attention_activation)
attention_weights_unnormalized = attention_weights
# Normalize attention weights.
attention_weights /= tf.reduce_sum(
attention_weights, axis=-1, keep_dims=True)
# Mask rows (with values of 0), based on columns that have 0 sum.
attention_weights = mask_2(
attention_weights, queries, keys, mask_type="query")
attention_weights_unnormalized = mask_2(
attention_weights_unnormalized, queries, keys, mask_type="query")
# [num_heads, B, M, num_units]
product = tf.matmul(attention_weights, values)
product = tf.reduce_sum(product, axis=2) # [num_heads, B, num_units]
product = tf.layers.dense(
inputs=product, units=hidden_units,
activation=tf.tanh, kernel_initializer=initializer) # [num_heads, B, hidden_units]
processed_tensor = tf.layers.dense(
inputs=product, units=1,
kernel_initializer=initializer) # [num_heads, B, 1]
processed_tensor = tf.transpose(
tf.squeeze(processed_tensor, axis=-1), [1, 0]) # [B, num_heads]
if separate_heads:
if num_sentence_labels == num_heads:
sentence_scores = processed_tensor
else:
# Get the sentence representations corresponding to the default head.
default_head = tf.gather(
processed_tensor,
indices=[0], axis=-1) # [B, 1]
# Get the sentence representations corresponding to the non-default head.
non_default_heads = tf.gather(
processed_tensor,
indices=list(range(1, num_heads)), axis=-1) # [B, num_heads-1]
# Project onto one unit, corresponding to the default sentence label score.
sentence_default_scores = tf.layers.dense(
default_head, units=1,
activation=scoring_activation, kernel_initializer=initializer,
name="sentence_default_scores_ff") # [B, 1]
# Project onto (num_sentence_labels-1) units, corresponding to
# the non-default sentence label scores.
sentence_non_default_scores = tf.layers.dense(
non_default_heads, units=num_sentence_labels - 1,
activation=scoring_activation, kernel_initializer=initializer,
name="sentence_non_default_scores_ff") # [B, num_sentence_labels-1]
sentence_scores = tf.concat(
[sentence_default_scores, sentence_non_default_scores],
axis=-1, name="sentence_scores_concatenation") # [B, num_sent_labels]
else:
sentence_scores = tf.layers.dense(
inputs=processed_tensor, units=num_sentence_labels,
activation=scoring_activation, kernel_initializer=initializer,
name="output_sent_specified_scores_ff") # [B, num_sent_labels]
sentence_probabilities = tf.nn.softmax(sentence_scores)
sentence_predictions = tf.argmax(sentence_probabilities, axis=1) # [B]
# Obtain token scores from attention weights. Shape is [num_heads, B, M].
if token_scoring_method == "sum":
token_scores = tf.reduce_sum(attention_weights_unnormalized, axis=2)
elif token_scoring_method == "max":
token_scores = tf.reduce_max(attention_weights_unnormalized, axis=2)
elif token_scoring_method == "avg":
token_scores = tf.reduce_mean(attention_weights_unnormalized, axis=2)
elif token_scoring_method == "logsumexp":
token_scores = tf.reduce_logsumexp(attention_weights_unnormalized, axis=2)
else:
raise ValueError("Unknown/unsupported token scoring method: %s"
% token_scoring_method)
token_scores = tf.transpose(token_scores, [1, 2, 0]) # [B, M, num_heads]
token_probabilities = tf.nn.softmax(token_scores)
token_predictions = tf.argmax(
token_probabilities, axis=2, output_type=tf.int32) # [B, M]
attention_weights = tf.transpose(attention_weights, [1, 2, 3, 0]) # [B, M, M, num_heads]
return sentence_scores, sentence_predictions, \
token_scores, token_predictions, \
token_probabilities, sentence_probabilities, attention_weights
def get_token_representative_values(token_probabilities, approach):
"""
Obtains the token probabilities representative for each head across the sentence.
:param token_probabilities: the softmaxed token scores.
:param approach: how to get the representations (max, avg, log).
:return: token_representative_values of shape [batch_size, num_heads].
"""
if "max" in approach:
token_representative_values = tf.reduce_max(
token_probabilities, axis=1)
elif "avg" in approach:
token_representative_values = tf.reduce_max(
token_probabilities, axis=1)
elif "log" in approach:
token_representative_values = tf.reduce_logsumexp(
token_probabilities, axis=1)
else:
raise ValueError("Unknown approach for getting "
"token representative values: %s." % approach)
return token_representative_values # [B, num_heads]
def get_one_hot_of_token_labels_length(
sentence_labels, num_sent_labels, num_tok_labels):
"""
Obtains one-hot sentence representations.
:param sentence_labels: ground truth sentence labels.
:param num_sent_labels: total number of unique sentence labels.
:param num_tok_labels: total number of unique token labels.
:return: one hot sentence labels, corresponding to the token labels.
"""
one_hot_sentence_labels = tf.one_hot(
tf.cast(sentence_labels, tf.int64),
depth=num_sent_labels)
if num_sent_labels == 2 and num_sent_labels != num_tok_labels:
# Get the default and non-default sentence labels.
default_sentence_labels = tf.gather(
one_hot_sentence_labels, indices=[0], axis=-1) # [B x 1]
non_default_sentence_labels = tf.gather(
one_hot_sentence_labels, indices=[1], axis=-1) # [B x 1]
# Tile the non-default one (num_tok_labels - 1) times.
tiled_non_default_sentence_labels = tf.tile(
input=non_default_sentence_labels,
multiples=[1, num_tok_labels - 1])
# Get one-hot sentence labels of shape [B, num_tok_labels].
one_hot_sentence_labels = tf.concat(
[default_sentence_labels, tiled_non_default_sentence_labels],
axis=-1, name="one_hot_sentence_labels_concatenation")
return one_hot_sentence_labels # [B, num_tok_labels]
def compute_attention_loss(
token_probabilities, sentence_labels,
num_sent_labels, num_tok_labels,
approach, compute_pairwise=False):
"""
Attention-level loss -- currently, implementation possible only in two cases:
1. The number of sentence labels is equal to the number of token labels.
In this case, the attention loss is computed element-wise (for each label).
2. The number of sentence labels is 2, while the number of tokens is arbitrary.
In this case, two scores are computed from the token scores:
* one corresponding to the default label
* one corresponding to the rest of labels (non-default labels)
:param token_probabilities: 3D tensor, shape [B, M, num_tok_labels]
that are normalized across heads (last axis).
:param sentence_labels: 2D tensor, shape [B, num_labels_tok]
:param num_sent_labels: number of unique sentence labels.
:param num_tok_labels: number of unique token labels.
:param approach: method to extract token representation values.
:param compute_pairwise: whether to compute the loss pairwise or not.
:return: a number representing the sum over attention losses computed.
"""
if num_sent_labels == num_tok_labels or num_sent_labels == 2:
# Compute the token representations based on the approach selected.
token_representative_values = get_token_representative_values(
token_probabilities, approach) # [B, num_heads]
one_hot_sentence_labels = get_one_hot_of_token_labels_length(
sentence_labels, num_sent_labels, num_tok_labels)
if compute_pairwise:
attention_loss = tf.losses.mean_pairwise_squared_error(
labels=label_smoothing(one_hot_sentence_labels, epsilon=0.15),
predictions=token_representative_values, weights=1.15)
else:
attention_loss = tf.square(
token_representative_values -
label_smoothing(one_hot_sentence_labels, epsilon=0.15))
else:
raise ValueError(
"You have different number of token labels (%d) and "
"sentence labels (%d, which is non-binary). "
"We don't support attention loss for such a case!"
% (num_tok_labels, num_sent_labels))
return attention_loss
def compute_gap_distance_loss(
token_probabilities, sentence_labels,
num_sent_labels, num_tok_labels,
minimum_gap_distance, approach,
type_distance):
"""
Gap-distance loss: the intuition is that the gap between the default
and non-default scores should be wider than a certain threshold.
:param token_probabilities: 3D tensor, shape [B, M, num_tok_labels]
that are normalized across heads (last axis).
:param sentence_labels: 2D tensor, shape [B, num_labels_tok]
:param num_sent_labels: number of unique sentence labels.
:param num_tok_labels: number of unique token labels.
:param minimum_gap_distance: the minimum distance gap imposed between
scores corresponding tot he default or non-default gold sentence label.
:param approach: method to extract token representation values.
:param type_distance: type of gap distance loss that you want.
:return: a number representing the sum over gap-distance losses.
"""
if num_sent_labels == num_tok_labels or num_sent_labels == 2:
# Compute the token representations based on the approach selected.
token_representative_values = get_token_representative_values(
token_probabilities, approach) # [B, num_heads]
one_hot_sentence_labels = get_one_hot_of_token_labels_length(
sentence_labels, num_sent_labels, num_tok_labels)
valid_tokens = tf.multiply(
tf.cast(one_hot_sentence_labels, tf.float32),
token_representative_values) # [B, num_tok_labels]
tokens_default_head_correct = tf.squeeze(tf.gather(
valid_tokens, indices=[0], axis=-1), axis=-1) # [B]
tokens_default_head_incorrect = tf.squeeze(tf.gather(
token_representative_values, indices=[0], axis=-1), axis=-1) # [B]
tokens_non_default_head_correct = tf.squeeze(
tf.reduce_max(tf.gather(
valid_tokens,
indices=[[i] for i in range(1, num_tok_labels)],
axis=-1), axis=1), axis=-1)
tokens_non_default_head_incorrect = tf.squeeze(
tf.reduce_max(tf.gather(
token_representative_values,
indices=[[i] for i in range(1, num_tok_labels)],
axis=-1), axis=1), axis=-1)
heads_correct = tf.stack(
[tokens_default_head_correct, tokens_non_default_head_correct],
axis=-1) # [B, 2]
heads_incorrect = tf.stack(
[tokens_default_head_incorrect, tokens_non_default_head_incorrect],
axis=-1) # [B, 2]
y_heads = tf.where(
tf.equal(tf.cast(tokens_non_default_head_correct, tf.int32), 0),
one_hot_sentence_labels,
tf.ones_like(one_hot_sentence_labels) - one_hot_sentence_labels)
"""
heads_correct = tf.where(
tf.equal(tf.cast(tokens_non_default_head, tf.int32), 0),
tokens_default_head,
tokens_non_default_head)
heads_incorrect = tf.where(
tf.equal(tf.cast(tokens_default_head, tf.int32), 0),
tokens_default_head,
tokens_non_default_head)
"""
if type_distance == "distance_only":
# loss = max(0.0, threshold - |correct - incorrect|).
gap_loss = tf.math.maximum(
0.0,
tf.math.subtract(
minimum_gap_distance,
tf.math.abs(tf.subtract(
tokens_default_head_incorrect,
tokens_non_default_head_incorrect))))
elif type_distance == "contrastive":
squared_euclidean_distance = tf.reduce_sum(
tf.square(heads_correct - heads_incorrect))
# loss = y * dist + (1 - y) * max(0.0, threshold - d).
gap_loss = tf.add(
tf.multiply(tf.ones_like(y_heads) - y_heads,
squared_euclidean_distance),
tf.multiply(y_heads,
tf.maximum(0.0,
minimum_gap_distance - squared_euclidean_distance)))
else:
# loss =
# [exp(max(0.0, threshold - |correct - incorrect|))
# * (1.0 + max(correct, incorrect) - x_correct)
# * (1.0 + incorrect - min(correct, incorrect))] - 1.0
gap_loss = tf.subtract(
tf.math.exp(tf.math.maximum(
0.0, minimum_gap_distance - tf.math.abs(heads_correct - heads_incorrect)))
* tf.add(1.0, tf.math.maximum(heads_correct, heads_incorrect) - heads_correct)
* tf.add(1.0, heads_incorrect - tf.math.minimum(heads_correct, heads_incorrect)),
1.0)
else:
raise ValueError(
"You have different number of token labels (%d) and "
"sentence labels (%d, which is non-binary). "
"We don't support attention loss for such a case!"
% (num_tok_labels, num_sent_labels))
return gap_loss
| 103,822 | 47.021739 | 102 | py |
multi-head-attention-labeller | multi-head-attention-labeller-master/disable_tokens.py | import random
random.seed(100)
def add_another_column(dataset, extension):
"""
The original dataset file has multiple columns, the first one being
the token and the last one the label. This method builds another file
containing these as well as an additional middle column, representing
the supervision status of a certain token, which can be "on" or "off".
To start, all tokens are disabled. Later, we gradually increase the
proportion of token-annotated sentences for which supervision is on.
"""
path2 = dataset + ".column_added" + extension
num_sent = 0
with open(dataset + extension) as read_file, open(path2, 'w') as write_file:
for line_tok_orig in read_file:
line_tok = line_tok_orig.strip()
if len(line_tok) == 0:
num_sent += 1
write_file.write(line_tok_orig)
continue
line_tok = line_tok.split()
write_file.write(line_tok[0] + "\t" + "off" + "\t" + line_tok[-1] + "\n")
return path2, num_sent
def convert_labels(read_dataset, write_dataset, percent, no_sentences_to_enable):
"""
Takes a file and randomly enables no_sentences_to_enable of them.
:param read_dataset: file to read from
:param write_dataset: file to write to
:param percent: percent to enable (used to calculate the probability of enabling)
:param no_sentences_to_enable: how many sentences should be enabled
:return: sentences enabled
"""
sentences_enabled = 0
write_file_str = ""
with open(read_dataset) as read_file:
lines = read_file.read().split("\n")
line_index = 0
while line_index < len(lines):
line = lines[line_index].strip()
if len(line) == 0:
write_file_str += "\n"
line_index += 1
continue
line_tok = line.split()
assert len(line_tok) > 2, "Line tok shouldn't be empty!"
prob = random.random()
conf = (percent + 3) / 100.0
if conf < 1 and (prob > conf or line_tok[1] == "on"
or sentences_enabled >= no_sentences_to_enable):
while len(line) != 0:
write_file_str += line + "\n"
line_index += 1
line = lines[line_index].strip()
else:
sentences_enabled += 1
while len(line) != 0:
line_tok = line.split()
write_file_str += (line_tok[0] + "\t" + "on" + "\t"
+ line_tok[-1] + "\t" + "\n")
line_index += 1
line = lines[line_index].strip()
write_file_str += "\n"
line_index += 1
with open(write_dataset, 'w') as write_file:
write_file.write(write_file_str)
return sentences_enabled
filename = "../data/fce_semi_supervised/fce.train"
ext = ".tsv"
curr_filename, no_sentences = add_another_column(filename, ext)
print("No. sentences = ", no_sentences)
pace = 10
for i in range(10, 110, pace):
wanted_enabled = int((pace / 100) * no_sentences)
start = i
prev_filename = curr_filename
curr_filename = filename + "_%d_percent" % i + ext
actually_enabled = convert_labels(
prev_filename, curr_filename, i, wanted_enabled)
print("Current filename is %s. Percent = %.1f. We want to enable %d."
% (curr_filename, i, wanted_enabled))
while actually_enabled < wanted_enabled:
wanted_enabled -= actually_enabled
print("Only got ", actually_enabled, "need ", wanted_enabled, " more...")
actually_enabled = convert_labels(
curr_filename, curr_filename, i, wanted_enabled)
print(" so we added ", actually_enabled, " more!")
| 3,741 | 36.79798 | 85 | py |
multi-head-attention-labeller | multi-head-attention-labeller-master/model.py | from math import ceil
from modules import cosine_distance_loss, label_smoothing
import collections
import numpy
import pickle
import re
import tensorflow as tf
class Model(object):
"""
Implements the multi-head attention labeller (MHAL).
"""
def __init__(self, config, label2id_sent, label2id_tok):
self.config = config
self.label2id_sent = label2id_sent
self.label2id_tok = label2id_tok
self.UNK = "<unk>"
self.CUNK = "<cunk>"
self.word2id = None
self.char2id = None
self.singletons = None
self.num_heads = None
self.word_ids = None
self.char_ids = None
self.sentence_lengths = None
self.word_lengths = None
self.sentence_labels = None
self.word_labels = None
self.word_embeddings = None
self.char_embeddings = None
self.word_objective_weights = None
self.sentence_objective_weights = None
self.learning_rate = None
self.loss = None
self.initializer = None
self.is_training = None
self.session = None
self.saver = None
self.train_op = None
self.sentence_predictions = None
self.sentence_probabilities = None
self.token_predictions = None
self.token_probabilities = None
def build_vocabs(self, data_train, data_dev, data_test, embedding_path=None):
"""
Builds the vocabulary based on the the data and embeddings info.
"""
data_source = list(data_train)
if self.config["vocab_include_devtest"]:
if data_dev is not None:
data_source += data_dev
if data_test is not None:
data_source += data_test
char_counter = collections.Counter()
word_counter = collections.Counter()
for sentence in data_source:
for token in sentence.tokens:
char_counter.update(token.value)
w = token.value
if self.config["lowercase"]:
w = w.lower()
if self.config["replace_digits"]:
w = re.sub(r'\d', '0', w)
word_counter[w] += 1
self.char2id = collections.OrderedDict([(self.CUNK, 0)])
for char, count in char_counter.most_common():
if char not in self.char2id:
self.char2id[char] = len(self.char2id)
self.word2id = collections.OrderedDict([(self.UNK, 0)])
for word, count in word_counter.most_common():
if self.config["min_word_freq"] <= 0 or count >= self.config["min_word_freq"]:
if word not in self.word2id:
self.word2id[word] = len(self.word2id)
self.singletons = set([word for word in word_counter if word_counter[word] == 1])
if embedding_path and self.config["vocab_only_embedded"]:
embedding_vocab = {self.UNK}
with open(embedding_path) as f:
for line in f:
line_parts = line.strip().split()
if len(line_parts) <= 2:
continue
w = line_parts[0]
if self.config["lowercase"]:
w = w.lower()
if self.config["replace_digits"]:
w = re.sub(r'\d', '0', w)
embedding_vocab.add(w)
word2id_revised = collections.OrderedDict()
for word in self.word2id:
if word in embedding_vocab and word not in word2id_revised:
word2id_revised[word] = len(word2id_revised)
self.word2id = word2id_revised
print("Total number of words: %d." % len(self.word2id))
print("Total number of chars: %d." % len(self.char2id))
print("Total number of singletons: %d." % len(self.singletons))
def construct_network(self):
"""
Constructs the multi-head attention labeller (MHAL) as described
in our paper/MPhil study. It uses keys, queries and values, to
apply a dot-product attention, allowing for query regularisation.
"""
self.word_ids = tf.placeholder(tf.int32, [None, None], name="word_ids")
self.char_ids = tf.placeholder(tf.int32, [None, None, None], name="char_ids")
self.sentence_lengths = tf.placeholder(tf.int32, [None], name="sentence_lengths")
self.word_lengths = tf.placeholder(tf.int32, [None, None], name="word_lengths")
self.sentence_labels = tf.placeholder(tf.float32, [None], name="sentence_labels")
self.word_labels = tf.placeholder(tf.float32, [None, None], name="word_labels")
self.word_objective_weights = tf.placeholder(
tf.float32, [None, None], name="word_objective_weights")
self.sentence_objective_weights = tf.placeholder(
tf.float32, [None], name="sentence_objective_weights")
self.learning_rate = tf.placeholder(tf.float32, name="learning_rate")
self.is_training = tf.placeholder(tf.int32, name="is_training")
self.loss = 0.0
if self.config["initializer"] == "normal":
self.initializer = tf.random_normal_initializer(stddev=0.1)
elif self.config["initializer"] == "glorot":
self.initializer = tf.glorot_uniform_initializer()
elif self.config["initializer"] == "xavier":
self.initializer = tf.glorot_normal_initializer()
zeros_initializer = tf.zeros_initializer()
self.word_embeddings = tf.get_variable(
name="word_embeddings",
shape=[len(self.word2id), self.config["word_embedding_size"]],
initializer=(zeros_initializer if self.config["emb_initial_zero"] else self.initializer),
trainable=(True if self.config["train_embeddings"] else False))
word_input_tensor = tf.nn.embedding_lookup(self.word_embeddings, self.word_ids)
if self.config["char_embedding_size"] > 0 and self.config["char_recurrent_size"] > 0:
with tf.variable_scope("chars"), tf.control_dependencies(
[tf.assert_equal(tf.shape(self.char_ids)[2],
tf.reduce_max(self.word_lengths),
message="Char dimensions don't match")]):
self.char_embeddings = tf.get_variable(
name="char_embeddings",
shape=[len(self.char2id), self.config["char_embedding_size"]],
initializer=self.initializer,
trainable=True)
char_input_tensor = tf.nn.embedding_lookup(self.char_embeddings, self.char_ids)
char_input_tensor_shape = tf.shape(char_input_tensor)
char_input_tensor = tf.reshape(
char_input_tensor,
shape=[char_input_tensor_shape[0]
* char_input_tensor_shape[1],
char_input_tensor_shape[2],
self.config["char_embedding_size"]])
_word_lengths = tf.reshape(
self.word_lengths, shape=[char_input_tensor_shape[0]
* char_input_tensor_shape[1]])
char_lstm_cell_fw = tf.nn.rnn_cell.LSTMCell(
self.config["char_recurrent_size"],
use_peepholes=self.config["lstm_use_peepholes"],
state_is_tuple=True,
initializer=self.initializer,
reuse=False)
char_lstm_cell_bw = tf.nn.rnn_cell.LSTMCell(
self.config["char_recurrent_size"],
use_peepholes=self.config["lstm_use_peepholes"],
state_is_tuple=True,
initializer=self.initializer,
reuse=False)
# Concatenate the final forward and the backward character contexts
# to obtain a compact character representation for each word.
_, ((_, char_output_fw), (_, char_output_bw)) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=char_lstm_cell_fw, cell_bw=char_lstm_cell_bw, inputs=char_input_tensor,
sequence_length=_word_lengths, dtype=tf.float32, time_major=False)
char_output_tensor = tf.concat([char_output_fw, char_output_bw], axis=-1)
char_output_tensor = tf.reshape(
char_output_tensor,
shape=[char_input_tensor_shape[0], char_input_tensor_shape[1],
2 * self.config["char_recurrent_size"]])
# Include a char-based language modelling loss, LM-c.
if self.config["lm_cost_char_gamma"] > 0.0:
self.loss += self.config["lm_cost_char_gamma"] * \
self.construct_lm_cost(
input_tensor_fw=char_output_tensor,
input_tensor_bw=char_output_tensor,
sentence_lengths=self.sentence_lengths,
target_ids=self.word_ids,
lm_cost_type="separate",
name="lm_cost_char_separate")
if self.config["lm_cost_joint_char_gamma"] > 0.0:
self.loss += self.config["lm_cost_joint_char_gamma"] * \
self.construct_lm_cost(
input_tensor_fw=char_output_tensor,
input_tensor_bw=char_output_tensor,
sentence_lengths=self.sentence_lengths,
target_ids=self.word_ids,
lm_cost_type="joint",
name="lm_cost_char_joint")
if self.config["char_hidden_layer_size"] > 0:
char_output_tensor = tf.layers.dense(
inputs=char_output_tensor, units=self.config["char_hidden_layer_size"],
activation=tf.tanh, kernel_initializer=self.initializer)
if self.config["char_integration_method"] == "concat":
word_input_tensor = tf.concat([word_input_tensor, char_output_tensor], axis=-1)
elif self.config["char_integration_method"] == "none":
word_input_tensor = word_input_tensor
else:
raise ValueError("Unknown char integration method")
if self.config["dropout_input"] > 0.0:
dropout_input = (self.config["dropout_input"] * tf.cast(self.is_training, tf.float32)
+ (1.0 - tf.cast(self.is_training, tf.float32)))
word_input_tensor = tf.nn.dropout(
word_input_tensor, dropout_input, name="dropout_word")
word_lstm_cell_fw = tf.nn.rnn_cell.LSTMCell(
self.config["word_recurrent_size"],
use_peepholes=self.config["lstm_use_peepholes"],
state_is_tuple=True,
initializer=self.initializer,
reuse=False)
word_lstm_cell_bw = tf.nn.rnn_cell.LSTMCell(
self.config["word_recurrent_size"],
use_peepholes=self.config["lstm_use_peepholes"],
state_is_tuple=True,
initializer=self.initializer,
reuse=False)
with tf.control_dependencies(
[tf.assert_equal(
tf.shape(self.word_ids)[1],
tf.reduce_max(self.sentence_lengths),
message="Sentence dimensions don't match")]):
(lstm_outputs_fw, lstm_outputs_bw), ((_, lstm_output_fw), (_, lstm_output_bw)) = \
tf.nn.bidirectional_dynamic_rnn(
cell_fw=word_lstm_cell_fw, cell_bw=word_lstm_cell_bw, inputs=word_input_tensor,
sequence_length=self.sentence_lengths, dtype=tf.float32, time_major=False)
lstm_output_states = tf.concat([lstm_output_fw, lstm_output_bw], axis=-1)
if self.config["dropout_word_lstm"] > 0.0:
dropout_word_lstm = (self.config["dropout_word_lstm"] * tf.cast(self.is_training, tf.float32)
+ (1.0 - tf.cast(self.is_training, tf.float32)))
lstm_outputs_fw = tf.nn.dropout(
lstm_outputs_fw, dropout_word_lstm,
noise_shape=tf.convert_to_tensor(
[tf.shape(self.word_ids)[0], 1, self.config["word_recurrent_size"]], dtype=tf.int32))
lstm_outputs_bw = tf.nn.dropout(
lstm_outputs_bw, dropout_word_lstm,
noise_shape=tf.convert_to_tensor(
[tf.shape(self.word_ids)[0], 1, self.config["word_recurrent_size"]], dtype=tf.int32))
lstm_output_states = tf.nn.dropout(lstm_output_states, dropout_word_lstm)
# The forward and backward states are concatenated at every token position.
lstm_outputs_states = tf.concat([lstm_outputs_fw, lstm_outputs_bw], axis=-1)
if self.config["whidden_layer_size"] > 0:
lstm_outputs_states = tf.layers.dense(
lstm_outputs_states, self.config["whidden_layer_size"],
activation=tf.tanh, kernel_initializer=self.initializer)
if "last" in self.config["model_type"]:
processed_tensor = lstm_output_states
token_scores = tf.layers.dense(
lstm_outputs_states, units=len(self.label2id_tok),
kernel_initializer=self.initializer,
name="token_scores_last_lstm_outputs_ff")
if self.config["hidden_layer_size"] > 0:
processed_tensor = tf.layers.dense(
processed_tensor, units=self.config["hidden_layer_size"],
activation=tf.tanh, kernel_initializer=self.initializer)
sentence_scores = tf.layers.dense(
processed_tensor, units=len(self.label2id_sent),
kernel_initializer=self.initializer,
name="sentence_scores_last_lstm_outputs_ff")
elif "attention" in self.config["model_type"]:
with tf.variable_scope("attention"):
num_heads = len(self.label2id_tok)
num_sentence_labels = len(self.label2id_sent)
num_units = lstm_outputs_states.get_shape().as_list()[-1]
if num_units % num_heads != 0:
num_units = ceil(num_units / num_heads) * num_heads
inputs = tf.layers.dense(lstm_outputs_states, num_units) # [B, M, num_units]
else:
inputs = lstm_outputs_states
# Project the inputs to get the keys, queries and values.
queries = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=self.initializer) # [B, M, num_units]
queries = tf.math.reduce_mean(queries, axis=1) # [B, num_units]
queries = tf.expand_dims(queries, axis=-1) # [B, num_units, 1]
keys = tf.layers.dense(
inputs, num_units, activation=tf.nn.tanh,
kernel_initializer=self.initializer) # [B, M, num_units]
values = tf.layers.dense(
inputs, num_units, activation=tf.tanh,
kernel_initializer=self.initializer) # [B, M, num_units]
# Split and concat to get as many projections as num_heads.
queries = tf.concat(
tf.split(queries, num_heads, axis=1),
axis=0) # [B*num_heads, num_units/num_heads, 1]
keys = tf.concat(
tf.split(keys, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
values = tf.concat(
tf.split(values, num_heads, axis=2),
axis=0) # [B*num_heads, M, num_units/num_heads]
if self.config["regularize_queries"] > 0:
self.loss += self.config["regularize_queries"] * cosine_distance_loss(
tf.concat(tf.split(tf.transpose(queries, [0, 2, 1]), num_heads), axis=1),
take_abs=self.config["take_abs"] if "take_abs" in self.config else False)
if self.config["regularize_keys"] > 0:
self.loss += self.config["regularize_keys"] * cosine_distance_loss(
tf.concat(tf.split(tf.expand_dims(keys, axis=2), num_heads), axis=2),
take_abs=self.config["take_abs"] if "take_abs" in self.config else False)
if self.config["regularize_values"] > 0:
self.loss += self.config["regularize_values"] * cosine_distance_loss(
tf.concat(tf.split(tf.expand_dims(values, axis=2), num_heads), axis=2),
take_abs=self.config["take_abs"] if "take_abs" in self.config else False)
# Multiply each key by its query to get the attention evidence scores.
attention_evidence = tf.matmul(keys, queries) # [B*num_heads, M, 1]
attention_evidence = tf.squeeze(attention_evidence, axis=-1) # [B*num_heads, M]
# Obtain token scores from the attention evidence scores.
token_scores = tf.concat(tf.split(
tf.expand_dims(attention_evidence, axis=-1),
num_heads), axis=2) # [B, M, num_heads]
# Apply a non-linear layer to obtain (un-normalized) attention weights.
if self.config["attention_activation"] == "sharp":
attention_weights = tf.exp(attention_evidence)
elif self.config["attention_activation"] == "soft":
attention_weights = tf.sigmoid(attention_evidence)
elif self.config["attention_activation"] == "linear":
attention_weights = attention_evidence
else:
raise ValueError("Unknown/unsupported token scoring method: %s"
% self.config["attention_activation"])
# Mask positions that are not valid.
tiled_sentence_lengths = tf.tile(
input=tf.sequence_mask(self.sentence_lengths),
multiples=[num_heads, 1]) # [B*num_heads, M]
attention_weights = tf.where(
tiled_sentence_lengths,
attention_weights,
tf.zeros_like(attention_weights))
# Normalize attention weights.
attention_weights /= tf.reduce_sum(
attention_weights, axis=-1, keep_dims=True) # [B*num_heads, M]
product = values * tf.expand_dims(attention_weights, axis=-1) # [B*num_heads, M, num_units/num_heads]
product = tf.reduce_sum(product, axis=1) # [B*num_heads, num_units/num_heads]
if self.config["regularize_sentence_repr"] > 0:
self.loss += self.config["regularize_sentence_repr"] * cosine_distance_loss(
tf.concat(tf.split(tf.expand_dims(product, axis=1), num_heads), axis=1),
take_abs=self.config["take_abs"] if "take_abs" in self.config else False)
if self.config["hidden_layer_size"] > 0:
product = tf.layers.dense(
inputs=product, units=self.config["hidden_layer_size"],
activation=tf.tanh, kernel_initializer=self.initializer)
processed_tensor = tf.layers.dense(
inputs=product, units=1,
kernel_initializer=self.initializer) # [B*num_heads, 1]
processed_tensor = tf.concat(
tf.split(processed_tensor, num_heads), axis=1) # [B, num_heads]
sentence_scores = processed_tensor
if num_heads != num_sentence_labels:
if num_sentence_labels == 2:
default_sentence_score = tf.gather(
processed_tensor, indices=[0], axis=1) # [B, 1]
maximum_non_default_sentence_score = tf.gather(
processed_tensor, indices=list(
range(1, num_heads)), axis=1) # [B, num_heads-1]
maximum_non_default_sentence_score = tf.reduce_max(
maximum_non_default_sentence_score, axis=1, keep_dims=True) # [B, 1]
sentence_scores = tf.concat(
[default_sentence_score, maximum_non_default_sentence_score],
axis=-1, name="sentence_scores_concatenation") # [B, 2]
else:
sentence_scores = tf.layers.dense(
processed_tensor, units=num_sentence_labels,
kernel_initializer=self.initializer) # [B, num_sent_labels]
else:
raise ValueError("Unknown/unsupported model_type: %s." % self.config["model_type"])
# Mask the token scores that do not fall in the range of the true sentence length.
# Do this for each head (change shape from [B, M] to [B, M, num_heads]).
tiled_sentence_lengths = tf.tile(
input=tf.expand_dims(
tf.sequence_mask(self.sentence_lengths), axis=-1),
multiples=[1, 1, len(self.label2id_tok)])
self.token_probabilities = tf.nn.softmax(token_scores, axis=-1)
self.token_probabilities = tf.where(
tiled_sentence_lengths,
self.token_probabilities,
tf.zeros_like(self.token_probabilities))
self.token_predictions = tf.argmax(self.token_probabilities, axis=2)
self.sentence_probabilities = tf.nn.softmax(sentence_scores)
self.sentence_predictions = tf.argmax(self.sentence_probabilities, axis=1)
if self.config["word_objective_weight"] > 0:
word_objective_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=token_scores, labels=tf.cast(self.word_labels, tf.int32))
word_objective_loss = tf.where(
tf.sequence_mask(self.sentence_lengths),
word_objective_loss,
tf.zeros_like(word_objective_loss))
self.loss += self.config["word_objective_weight"] * tf.reduce_sum(
self.word_objective_weights * word_objective_loss)
if self.config["sentence_objective_weight"] > 0:
self.loss += self.config["sentence_objective_weight"] * tf.reduce_sum(
self.sentence_objective_weights *
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=sentence_scores, labels=tf.cast(self.sentence_labels, tf.int32)))
max_over_token_heads = tf.reduce_max(self.token_probabilities, axis=1) # [B, H]
one_hot_sentence_labels = tf.one_hot(
tf.cast(self.sentence_labels, tf.int32),
depth=len(self.label2id_sent))
if self.config["enable_label_smoothing"]:
one_hot_sentence_labels_smoothed = label_smoothing(
one_hot_sentence_labels, epsilon=self.config["smoothing_epsilon"])
else:
one_hot_sentence_labels_smoothed = one_hot_sentence_labels
# At least one token has a label corresponding to the true sentence label.
# This also pushes the other maximum heads towards (a smoothed) 0.
if self.config["type1_attention_objective_weight"] > 0:
this_max_over_token_heads = max_over_token_heads
if len(self.label2id_tok) != len(self.label2id_sent):
if len(self.label2id_sent) == 2:
max_default_head = tf.gather(
max_over_token_heads, indices=[0], axis=-1) # [B, 1]
max_non_default_head = tf.reduce_max(tf.gather(
max_over_token_heads, indices=list(
range(1, len(self.label2id_tok))), axis=-1),
axis=1, keep_dims=True) # [B, 1]
this_max_over_token_heads = tf.concat(
[max_default_head, max_non_default_head], axis=-1) # [B, 2]
else:
raise ValueError(
"Unsupported attention loss for num_heads != num_sent_lables "
"and num_sentence_labels != 2.")
self.loss += self.config["type1_attention_objective_weight"] * (
tf.reduce_sum(self.sentence_objective_weights * tf.reduce_sum(tf.square(
this_max_over_token_heads - one_hot_sentence_labels_smoothed), axis=-1)))
# The predicted distribution over the token labels (heads) should be similar
# to the predicted distribution over the sentence representations.
if self.config["type2_attention_objective_weight"] > 0:
all_sentence_scores_probabilities = tf.nn.softmax(processed_tensor) # [B, H]
self.loss += self.config["type2_attention_objective_weight"] * (
tf.reduce_sum(self.sentence_objective_weights * tf.reduce_sum(tf.square(
max_over_token_heads - all_sentence_scores_probabilities), axis=-1)))
# At least one token has a label corresponding to the true sentence label.
if self.config["type3_attention_objective_weight"] > 0:
this_max_over_token_heads = max_over_token_heads
if len(self.label2id_tok) != len(self.label2id_sent):
if len(self.label2id_sent) == 2:
max_default_head = tf.gather(
max_over_token_heads, indices=[0], axis=-1) # [B, 1]
max_non_default_head = tf.reduce_max(tf.gather(
max_over_token_heads, indices=list(
range(1, len(self.label2id_tok))), axis=-1),
axis=1, keep_dims=True) # [B, 1]
this_max_over_token_heads = tf.concat(
[max_default_head, max_non_default_head], axis=-1) # [B, 2]
else:
raise ValueError(
"Unsupported attention loss for num_heads != num_sent_lables "
"and num_sentence_labels != 2.")
self.loss += self.config["type3_attention_objective_weight"] * (
tf.reduce_sum(self.sentence_objective_weights * tf.reduce_sum(tf.square(
(this_max_over_token_heads * one_hot_sentence_labels)
- one_hot_sentence_labels_smoothed), axis=-1)))
# A sentence that has a default label, should only contain tokens labeled as default.
if self.config["type4_attention_objective_weight"] > 0:
default_head = tf.gather(self.token_probabilities, indices=[0], axis=-1) # [B, M, 1]
default_head = tf.squeeze(default_head, axis=-1) # [B, M]
self.loss += self.config["type4_attention_objective_weight"] * (
tf.reduce_sum(self.sentence_objective_weights * tf.cast(
tf.equal(self.sentence_labels, 0.0), tf.float32) * tf.reduce_sum(
tf.square(default_head - tf.ones_like(default_head)), axis=-1)))
# Every sentence has at least one default label.
if self.config["type5_attention_objective_weight"] > 0:
default_head = tf.gather(self.token_probabilities, indices=[0], axis=-1) # [B, M, 1]
max_default_head = tf.reduce_max(tf.squeeze(default_head, axis=-1), axis=-1) # [B]
self.loss += self.config["type5_attention_objective_weight"] * (
tf.reduce_sum(self.sentence_objective_weights * tf.square(
max_default_head - tf.ones_like(max_default_head))))
# Pairwise attention objective function.
if self.config["type6_attention_objective_weight"] > 0:
this_max_over_token_heads = max_over_token_heads
if len(self.label2id_tok) != len(self.label2id_sent):
if len(self.label2id_sent) == 2:
max_default_head = tf.gather(
max_over_token_heads, indices=[0], axis=-1) # [B, 1]
max_non_default_head = tf.reduce_max(tf.gather(
max_over_token_heads, indices=list(
range(1, len(self.label2id_tok))), axis=-1),
axis=1, keep_dims=True) # [B, 1]
this_max_over_token_heads = tf.concat(
[max_default_head, max_non_default_head], axis=-1) # [B, 2]
else:
raise ValueError(
"Unsupported attention loss for num_heads != num_sent_lables "
"and num_sentence_labels != 2.")
self.loss += self.config["type6_attention_objective_weight"] * (
tf.losses.mean_pairwise_squared_error(
labels=one_hot_sentence_labels_smoothed,
predictions=this_max_over_token_heads))
# The distribution over tokens should be similar to the distribution over sentences.
if self.config["type7_attention_objective_weight"] > 0:
op_over_token_heads = tf.reduce_mean(self.token_probabilities, axis=1) # [B, H]
distribution_over_tokens = tf.nn.softmax(op_over_token_heads)
distribution_over_sentences = tf.nn.softmax(processed_tensor) # [B, H]
self.loss += self.config["type7_attention_objective_weight"] * (
tf.reduce_sum(self.sentence_objective_weights * tf.distributions.kl_divergence(
distribution_a=tf.distributions.Categorical(distribution_over_sentences),
distribution_b=tf.distributions.Categorical(distribution_over_tokens))))
# Include a word-based language modelling loss, LMw.
if self.config["lm_cost_lstm_gamma"] > 0.0:
self.loss += self.config["lm_cost_lstm_gamma"] * self.construct_lm_cost(
input_tensor_fw=lstm_outputs_fw,
input_tensor_bw=lstm_outputs_bw,
sentence_lengths=self.sentence_lengths,
target_ids=self.word_ids,
lm_cost_type="separate",
name="lm_cost_lstm_separate")
if self.config["lm_cost_joint_lstm_gamma"] > 0.0:
self.loss += self.config["lm_cost_joint_lstm_gamma"] * self.construct_lm_cost(
input_tensor_fw=lstm_outputs_fw,
input_tensor_bw=lstm_outputs_bw,
sentence_lengths=self.sentence_lengths,
target_ids=self.word_ids,
lm_cost_type="joint",
name="lm_cost_lstm_joint")
self.train_op = self.construct_optimizer(
opt_strategy=self.config["opt_strategy"],
loss=self.loss,
learning_rate=self.learning_rate,
clip=self.config["clip"])
print("Notwork built.")
def construct_lm_cost(
self, input_tensor_fw, input_tensor_bw,
sentence_lengths, target_ids, lm_cost_type, name):
"""
Constructs the char/word-based language modelling objective.
"""
with tf.variable_scope(name):
lm_cost_max_vocab_size = min(
len(self.word2id), self.config["lm_cost_max_vocab_size"])
target_ids = tf.where(
tf.greater_equal(target_ids, lm_cost_max_vocab_size - 1),
x=(lm_cost_max_vocab_size - 1) + tf.zeros_like(target_ids),
y=target_ids)
cost = 0.0
if lm_cost_type == "separate":
lm_cost_fw_mask = tf.sequence_mask(
sentence_lengths, maxlen=tf.shape(target_ids)[1])[:, 1:]
lm_cost_bw_mask = tf.sequence_mask(
sentence_lengths, maxlen=tf.shape(target_ids)[1])[:, :-1]
lm_cost_fw = self._construct_lm_cost(
input_tensor_fw[:, :-1, :],
lm_cost_max_vocab_size,
lm_cost_fw_mask,
target_ids[:, 1:],
name=name + "_fw")
lm_cost_bw = self._construct_lm_cost(
input_tensor_bw[:, 1:, :],
lm_cost_max_vocab_size,
lm_cost_bw_mask,
target_ids[:, :-1],
name=name + "_bw")
cost += lm_cost_fw + lm_cost_bw
elif lm_cost_type == "joint":
joint_input_tensor = tf.concat(
[input_tensor_fw[:, :-2, :], input_tensor_bw[:, 2:, :]], axis=-1)
lm_cost_mask = tf.sequence_mask(
sentence_lengths, maxlen=tf.shape(target_ids)[1])[:, 1:-1]
cost += self._construct_lm_cost(
joint_input_tensor,
lm_cost_max_vocab_size,
lm_cost_mask,
target_ids[:, 1:-1],
name=name + "_joint")
else:
raise ValueError("Unknown lm_cost_type: %s." % lm_cost_type)
return cost
def _construct_lm_cost(
self, input_tensor, lm_cost_max_vocab_size,
lm_cost_mask, target_ids, name):
with tf.variable_scope(name):
lm_cost_hidden_layer = tf.layers.dense(
inputs=input_tensor, units=self.config["lm_cost_hidden_layer_size"],
activation=tf.tanh, kernel_initializer=self.initializer)
lm_cost_output = tf.layers.dense(
inputs=lm_cost_hidden_layer, units=lm_cost_max_vocab_size,
kernel_initializer=self.initializer)
lm_cost_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=lm_cost_output, labels=target_ids)
lm_cost_loss = tf.where(lm_cost_mask, lm_cost_loss, tf.zeros_like(lm_cost_loss))
return tf.reduce_sum(lm_cost_loss)
@staticmethod
def construct_optimizer(opt_strategy, loss, learning_rate, clip):
"""
Applies an optimization strategy to minimize the loss.
"""
if opt_strategy == "adadelta":
optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate)
elif opt_strategy == "adam":
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
elif opt_strategy == "sgd":
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
else:
raise ValueError("Unknown optimisation strategy: %s." % opt_strategy)
if clip > 0.0:
grads, vs = zip(*optimizer.compute_gradients(loss))
grads, gnorm = tf.clip_by_global_norm(grads, clip)
train_op = optimizer.apply_gradients(zip(grads, vs))
else:
train_op = optimizer.minimize(loss)
return train_op
def preload_word_embeddings(self, embedding_path):
"""
Load the word embeddings in advance to get a feel
of the proportion of singletons in the dataset.
"""
loaded_embeddings = set()
embedding_matrix = self.session.run(self.word_embeddings)
with open(embedding_path) as f:
for line in f:
line_parts = line.strip().split()
if len(line_parts) <= 2:
continue
w = line_parts[0]
if self.config["lowercase"]:
w = w.lower()
if self.config["replace_digits"]:
w = re.sub(r'\d', '0', w)
if w in self.word2id and w not in loaded_embeddings:
word_id = self.word2id[w]
embedding = numpy.array(line_parts[1:])
embedding_matrix[word_id] = embedding
loaded_embeddings.add(w)
self.session.run(self.word_embeddings.assign(embedding_matrix))
print("No. of pre-loaded embeddings: %d." % len(loaded_embeddings))
@staticmethod
def translate2id(
token, token2id, unk_token=None, lowercase=False,
replace_digits=False, singletons=None, singletons_prob=0.0):
"""
Maps each token/character to its index.
"""
if lowercase:
token = token.lower()
if replace_digits:
token = re.sub(r'\d', '0', token)
if singletons and token in singletons \
and token in token2id and unk_token \
and numpy.random.uniform() < singletons_prob:
token_id = token2id[unk_token]
elif token in token2id:
token_id = token2id[token]
elif unk_token:
token_id = token2id[unk_token]
else:
raise ValueError("Unable to handle value, no UNK token: %s." % token)
return token_id
def create_input_dictionary_for_batch(self, batch, is_training, learning_rate):
"""
Creates the dictionary fed to the the TF model.
"""
sentence_lengths = numpy.array([len(sentence.tokens) for sentence in batch])
max_sentence_length = sentence_lengths.max()
max_word_length = numpy.array(
[numpy.array([len(token.value) for token in sentence.tokens]).max()
for sentence in batch]).max()
if 0 < self.config["allowed_word_length"] < max_word_length:
max_word_length = min(max_word_length, self.config["allowed_word_length"])
word_ids = numpy.zeros(
(len(batch), max_sentence_length), dtype=numpy.int32)
char_ids = numpy.zeros(
(len(batch), max_sentence_length, max_word_length), dtype=numpy.int32)
word_lengths = numpy.zeros(
(len(batch), max_sentence_length), dtype=numpy.int32)
word_labels = numpy.zeros(
(len(batch), max_sentence_length), dtype=numpy.float32)
sentence_labels = numpy.zeros(
(len(batch)), dtype=numpy.float32)
word_objective_weights = numpy.zeros(
(len(batch), max_sentence_length), dtype=numpy.float32)
sentence_objective_weights = numpy.zeros((len(batch)), dtype=numpy.float32)
# A proportion of the singletons are assigned to UNK (do this just for training).
singletons = self.singletons if is_training else None
singletons_prob = self.config["singletons_prob"] if is_training else 0.0
for i, sentence in enumerate(batch):
sentence_labels[i] = sentence.label_sent
if sentence_labels[i] != 0:
if self.config["sentence_objective_weights_non_default"] > 0.0:
sentence_objective_weights[i] = self.config[
"sentence_objective_weights_non_default"]
else:
sentence_objective_weights[i] = 1.0
else:
sentence_objective_weights[i] = 1.0
for j, token in enumerate(sentence.tokens):
word_ids[i][j] = self.translate2id(
token=token.value,
token2id=self.word2id,
unk_token=self.UNK,
lowercase=self.config["lowercase"],
replace_digits=self.config["replace_digits"],
singletons=singletons,
singletons_prob=singletons_prob)
word_labels[i][j] = token.label_tok
word_lengths[i][j] = len(token.value)
for k in range(min(len(token.value), max_word_length)):
char_ids[i][j][k] = self.translate2id(
token=token.value[k],
token2id=self.char2id,
unk_token=self.CUNK)
if token.enable_supervision is True:
word_objective_weights[i][j] = 1.0
input_dictionary = {
self.word_ids: word_ids,
self.char_ids: char_ids,
self.sentence_lengths: sentence_lengths,
self.word_lengths: word_lengths,
self.sentence_labels: sentence_labels,
self.word_labels: word_labels,
self.word_objective_weights: word_objective_weights,
self.sentence_objective_weights: sentence_objective_weights,
self.learning_rate: learning_rate,
self.is_training: is_training}
return input_dictionary
def process_batch(self, batch, is_training, learning_rate):
"""
Processes a batch of sentences.
:param batch: a set of sentences of size "max_batch_size".
:param is_training: whether the current batch is a training instance or not.
:param learning_rate: the pace at which learning should be performed.
:return: the cost, the sentence predictions, the sentence label distribution,
the token predictions and the token label distribution.
"""
feed_dict = self.create_input_dictionary_for_batch(batch, is_training, learning_rate)
cost, sentence_pred, sentence_prob, token_pred, token_prob = self.session.run(
[self.loss, self.sentence_predictions, self.sentence_probabilities,
self.token_predictions, self.token_probabilities] +
([self.train_op] if is_training else []), feed_dict=feed_dict)[:5]
return cost, sentence_pred, sentence_prob, token_pred, token_prob
def initialize_session(self):
"""
Initializes a tensorflow session and sets the random seed.
"""
tf.set_random_seed(self.config["random_seed"])
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = self.config["tf_allow_growth"]
session_config.gpu_options.per_process_gpu_memory_fraction = self.config[
"tf_per_process_gpu_memory_fraction"]
self.session = tf.Session(config=session_config)
self.session.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(max_to_keep=1)
@staticmethod
def get_parameter_count():
"""
Counts the total number of parameters.
"""
total_parameters = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
return total_parameters
def get_parameter_count_without_word_embeddings(self):
"""
Counts the number of parameters without those introduced by word embeddings.
"""
shape = self.word_embeddings.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
return self.get_parameter_count() - variable_parameters
def save(self, filename):
"""
Saves a trained model to the path in filename.
"""
dump = dict()
dump["config"] = self.config
dump["label2id_sent"] = self.label2id_sent
dump["label2id_tok"] = self.label2id_tok
dump["UNK"] = self.UNK
dump["CUNK"] = self.CUNK
dump["word2id"] = self.word2id
dump["char2id"] = self.char2id
dump["singletons"] = self.singletons
dump["params"] = {}
for variable in tf.global_variables():
assert (
variable.name not in dump["params"]), \
"Error: variable with this name already exists: %s." % variable.name
dump["params"][variable.name] = self.session.run(variable)
with open(filename, 'wb') as f:
pickle.dump(dump, f, protocol=pickle.HIGHEST_PROTOCOL)
@staticmethod
def load(filename, new_config=None):
"""
Loads a pre-trained MHAL model.
"""
with open(filename, 'rb') as f:
dump = pickle.load(f)
dump["config"]["save"] = None
# Use the saved config, except for values that are present in the new config.
if new_config:
for key in new_config:
dump["config"][key] = new_config[key]
labeler = Model(dump["config"], dump["label2id_sent"], dump["label2id_tok"])
labeler.UNK = dump["UNK"]
labeler.CUNK = dump["CUNK"]
labeler.word2id = dump["word2id"]
labeler.char2id = dump["char2id"]
labeler.singletons = dump["singletons"]
labeler.construct_network()
labeler.initialize_session()
labeler.load_params(filename)
return labeler
def load_params(self, filename):
"""
Loads the parameters of a trained model.
"""
with open(filename, 'rb') as f:
dump = pickle.load(f)
for variable in tf.global_variables():
assert (variable.name in dump["params"]), \
"Variable not in dump: %s." % variable.name
assert (variable.shape == dump["params"][variable.name].shape), \
"Variable shape not as expected: %s, of shape %s. %s" % (
variable.name, str(variable.shape),
str(dump["params"][variable.name].shape))
value = numpy.asarray(dump["params"][variable.name])
self.session.run(variable.assign(value))
| 45,808 | 48.846572 | 118 | py |
multi-head-attention-labeller | multi-head-attention-labeller-master/conlleval.py | #!/usr/bin/env python
# Python version of the evaluation script from CoNLL'00-
# Originates from: https://github.com/spyysalo/conlleval.py
# Intentional differences:
# - accept any space as delimiter by default
# - optional file argument (default STDIN)
# - option to set boundary (-b argument)
# - LaTeX output (-l argument) not supported
# - raw tags (-r argument) not supported
from collections import defaultdict, namedtuple
import re
import sys
ANY_SPACE = '<SPACE>'
class FormatError(Exception):
pass
Metrics = namedtuple('Metrics', 'tp fp fn prec rec fscore')
class EvalCounts(object):
def __init__(self):
self.correct_chunk = 0 # number of correctly identified chunks
self.correct_tags = 0 # number of correct chunk tags
self.found_correct = 0 # number of chunks in corpus
self.found_guessed = 0 # number of identified chunks
self.token_counter = 0 # token counter (ignores sentence breaks)
# counts by type
self.t_correct_chunk = defaultdict(int)
self.t_found_correct = defaultdict(int)
self.t_found_guessed = defaultdict(int)
def parse_args(argv):
import argparse
parser = argparse.ArgumentParser(
description='evaluate tagging results using CoNLL criteria',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
arg = parser.add_argument
arg('-b', '--boundary', metavar='STR', default='-X-',
help='sentence boundary')
arg('-d', '--delimiter', metavar='CHAR', default=ANY_SPACE,
help='character delimiting items in input')
arg('-o', '--otag', metavar='CHAR', default='O',
help='alternative outside tag')
arg('file', nargs='?', default=None)
return parser.parse_args(argv)
def parse_tag(t):
m = re.match(r'^([^-]*)-(.*)$', t)
return m.groups() if m else (t, '')
def evaluate(iterable, options=None):
if options is None:
options = parse_args([]) # use defaults
counts = EvalCounts()
num_features = None # number of features per line
in_correct = False # currently processed chunks is correct until now
last_correct = 'O' # previous chunk tag in corpus
last_correct_type = '' # type of previously identified chunk tag
last_guessed = 'O' # previously identified chunk tag
last_guessed_type = '' # type of previous chunk tag in corpus
for line in iterable:
line = line.rstrip('\r\n')
if options.delimiter == ANY_SPACE:
features = line.split()
else:
features = line.split(options.delimiter)
if num_features is None:
num_features = len(features)
elif num_features != len(features) and len(features) != 0:
raise FormatError('unexpected number of features: %d (%d)' %
(len(features), num_features))
if len(features) == 0 or features[0] == options.boundary:
features = [options.boundary, 'O', 'O']
if len(features) < 3:
raise FormatError('unexpected number of features in line %s' % line)
guessed, guessed_type = parse_tag(features.pop())
correct, correct_type = parse_tag(features.pop())
first_item = features.pop(0)
if first_item == options.boundary:
guessed = 'O'
end_correct = end_of_chunk(last_correct, correct,
last_correct_type, correct_type)
end_guessed = end_of_chunk(last_guessed, guessed,
last_guessed_type, guessed_type)
start_correct = start_of_chunk(last_correct, correct,
last_correct_type, correct_type)
start_guessed = start_of_chunk(last_guessed, guessed,
last_guessed_type, guessed_type)
if in_correct:
if end_correct and end_guessed and last_guessed_type == last_correct_type:
in_correct = False
counts.correct_chunk += 1
counts.t_correct_chunk[last_correct_type] += 1
elif end_correct != end_guessed or guessed_type != correct_type:
in_correct = False
if start_correct and start_guessed and guessed_type == correct_type:
in_correct = True
if start_correct:
counts.found_correct += 1
counts.t_found_correct[correct_type] += 1
if start_guessed:
counts.found_guessed += 1
counts.t_found_guessed[guessed_type] += 1
if first_item != options.boundary:
if correct == guessed and guessed_type == correct_type:
counts.correct_tags += 1
counts.token_counter += 1
last_guessed = guessed
last_correct = correct
last_guessed_type = guessed_type
last_correct_type = correct_type
if in_correct:
counts.correct_chunk += 1
counts.t_correct_chunk[last_correct_type] += 1
return counts
def uniq(iterable):
seen = set()
return [i for i in iterable if not (i in seen or seen.add(i))]
def calculate_metrics(correct, guessed, total):
tp, fp, fn = correct, guessed-correct, total-correct
p = 0 if tp + fp == 0 else 1.*tp / (tp + fp)
r = 0 if tp + fn == 0 else 1.*tp / (tp + fn)
f = 0 if p + r == 0 else 2 * p * r / (p + r)
return Metrics(tp, fp, fn, p, r, f)
def metrics(counts):
c = counts
overall = calculate_metrics(
c.correct_chunk, c.found_guessed, c.found_correct
)
by_type = {}
for t in uniq(list(c.t_found_correct.keys()) + list(c.t_found_guessed.keys())):
by_type[t] = calculate_metrics(
c.t_correct_chunk[t], c.t_found_guessed[t], c.t_found_correct[t]
)
return overall, by_type
def report(counts, out=None):
if out is None:
out = sys.stdout
overall, by_type = metrics(counts)
c = counts
out.write('processed %d tokens with %d phrases; ' %
(c.token_counter, c.found_correct))
out.write('found: %d phrases; correct: %d.\n' %
(c.found_guessed, c.correct_chunk))
if c.token_counter > 0:
out.write('accuracy: %6.2f%%; ' %
(100.*c.correct_tags/c.token_counter))
out.write('precision: %6.2f%%; ' % (100.*overall.prec))
out.write('recall: %6.2f%%; ' % (100.*overall.rec))
out.write('FB1: %6.2f\n' % (100.*overall.fscore))
for i, m in sorted(by_type.items()):
out.write('%17s: ' % i)
out.write('precision: %6.2f%%; ' % (100.*m.prec))
out.write('recall: %6.2f%%; ' % (100.*m.rec))
out.write('FB1: %6.2f %d\n' % (100.*m.fscore, c.t_found_guessed[i]))
def end_of_chunk(prev_tag, tag, prev_type, type_):
# check if a chunk ended between the previous and current word
# arguments: previous and current chunk tags, previous and current types
chunk_end = False
if prev_tag == 'E':
chunk_end = True
if prev_tag == 'S':
chunk_end = True
if prev_tag == 'B' and tag == 'B':
chunk_end = True
if prev_tag == 'B' and tag == 'S':
chunk_end = True
if prev_tag == 'B' and tag == 'O':
chunk_end = True
if prev_tag == 'I' and tag == 'B':
chunk_end = True
if prev_tag == 'I' and tag == 'S':
chunk_end = True
if prev_tag == 'I' and tag == 'O':
chunk_end = True
if prev_tag != 'O' and prev_tag != '.' and prev_type != type_:
chunk_end = True
# these chunks are assumed to have length 1
if prev_tag == ']':
chunk_end = True
if prev_tag == '[':
chunk_end = True
return chunk_end
def start_of_chunk(prev_tag, tag, prev_type, type_):
# check if a chunk started between the previous and current word
# arguments: previous and current chunk tags, previous and current types
chunk_start = False
if tag == 'B':
chunk_start = True
if tag == 'S':
chunk_start = True
if prev_tag == 'E' and tag == 'E':
chunk_start = True
if prev_tag == 'E' and tag == 'I':
chunk_start = True
if prev_tag == 'S' and tag == 'E':
chunk_start = True
if prev_tag == 'S' and tag == 'I':
chunk_start = True
if prev_tag == 'O' and tag == 'E':
chunk_start = True
if prev_tag == 'O' and tag == 'I':
chunk_start = True
if tag != 'O' and tag != '.' and prev_type != type_:
chunk_start = True
# these chunks are assumed to have length 1
if tag == '[':
chunk_start = True
if tag == ']':
chunk_start = True
return chunk_start
def main(argv):
args = parse_args(argv[1:])
if args.file is None:
counts = evaluate(sys.stdin, args)
else:
with open(args.file) as f:
counts = evaluate(f, args)
report(counts)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 8,967 | 30.914591 | 86 | py |
multi-head-attention-labeller | multi-head-attention-labeller-master/evaluator.py | from collections import OrderedDict
from sklearn.metrics import classification_report
import conlleval
import numpy as np
import time
class Evaluator:
"""
Evaluates the results of a joint text classifier.
"""
def __init__(self, label2id_sent, label2id_tok, conll03_eval):
self.id2label_sent = {v: k for k, v in label2id_sent.items()}
self.id2label_tok = {v: k for k, v in label2id_tok.items()}
self.conll03_eval = conll03_eval
self.conll_format = []
self.true_sent = []
self.pred_sent = []
self.true_tok = []
self.pred_tok = []
self.cost_sum = 0.0
self.count_sent = 0.0
self.correct_binary_sent = 0.0
self.count_tok = 0.0
self.correct_binary_tok = 0.0
self.sentence_predicted = {k: 0.0 for k in self.id2label_sent.keys()}
self.sentence_correct = {k: 0.0 for k in self.id2label_sent.keys()}
self.sentence_total = {k: 0.0 for k in self.id2label_sent.keys()}
self.token_predicted = {k: 0.0 for k in self.id2label_tok.keys()}
self.token_correct = {k: 0.0 for k in self.id2label_tok.keys()}
self.token_total = {k: 0.0 for k in self.id2label_tok.keys()}
self.start_time = time.time()
def append_token_data_for_sentence(self, tokens, true_labels_tok, pred_labels_tok):
"""
Gets statistical results for the tokens in a sentence.
"""
self.count_tok += len(true_labels_tok)
# For each token, calculate the same metrics as for the sentence scores.
for token, true_label, pred_label in zip(tokens, true_labels_tok, pred_labels_tok):
self.true_tok.append(true_label)
self.pred_tok.append(pred_label)
if true_label == pred_label:
self.correct_binary_tok += 1.0 # accuracy
self.token_predicted[pred_label] += 1.0 # TP + FP
self.token_total[true_label] += 1.0 # TP + FN
if true_label == pred_label:
self.token_correct[true_label] += 1.0 # TP
if self.conll03_eval is True:
gold_token_label = self.id2label_tok[true_label]
gold_token_label = "B-" + gold_token_label if true_label != 0 else gold_token_label
pred_token_label = self.id2label_tok[pred_label]
pred_token_label = "B-" + pred_token_label if true_label != 0 else pred_token_label
self.conll_format.append(
token + "\t" + gold_token_label + "\t" + pred_token_label)
if self.conll03_eval is True:
self.conll_format.append("")
def append_data(self, cost, batch, sentence_predictions, token_predictions):
"""
Gets statistical results for the sentence and token scores in a batch.
"""
self.cost_sum += cost
self.count_sent += len(batch)
for i, sentence in enumerate(batch):
true_labels_tok = [token.label_tok for token in sentence.tokens]
true_labels_sent = sentence.label_sent
self.true_sent.append(true_labels_sent)
self.pred_sent.append(sentence_predictions[i])
# Calculate accuracy.
if true_labels_sent == sentence_predictions[i]:
self.correct_binary_sent += 1.0
# Calculate TP + FP.
self.sentence_predicted[sentence_predictions[i]] += 1.0
# Calculate TP + FN.
self.sentence_total[true_labels_sent] += 1.0
# Calculate TP.
if true_labels_sent == sentence_predictions[i]:
self.sentence_correct[true_labels_sent] += 1.0
# Get the scores for the tokens in this sentence
self.append_token_data_for_sentence(
[token.value for token in sentence.tokens],
true_labels_tok, list(token_predictions[i])[:len(true_labels_tok)])
@staticmethod
def calculate_metrics(correct, predicted, total):
"""
Calculates the basic metrics.
:param correct: the number of examples predicted as correct that are actually correct.
:param predicted: the number of examples predicted as correct.
:param total: the number of examples that are correct by the gold standard.
:return: the precision, recall, F1 and F05 scores
"""
p = correct / predicted if predicted else 0.0
r = correct / total if total else 0.0
f = 2.0 * p * r / (p + r) if p + r else 0.0
f05 = (1 + 0.5 * 0.5) * p * r / (0.5 * 0.5 * p + r) if 0.5 * 0.5 * p + r else 0.0
return p, r, f, f05
def get_results(self, name, token_labels_available=True):
"""
Gets the statistical results both at the sentence and at the token level.
:param name: train, dev or test (+ epoch number).
:param token_labels_available: whether there are token annotations.
:return: an ordered dictionary containing the collection of results.
"""
results = OrderedDict()
results["name"] = name
results["cost_sum"] = self.cost_sum
results["cost_avg"] = (self.cost_sum / float(self.count_sent)
if self.count_sent else 0.0)
results["count_sent"] = self.count_sent
results["total_correct_sent"] = self.correct_binary_sent
results["accuracy_sent"] = (self.correct_binary_sent / float(self.count_sent)
if self.count_sent else 0.0)
# Calculate the micro and macro averages for the sentence predictions
f_macro_sent, p_macro_sent, r_macro_sent, f05_macro_sent = 0.0, 0.0, 0.0, 0.0
f_non_default_macro_sent, p_non_default_macro_sent, \
r_non_default_macro_sent, f05_non_default_macro_sent = 0.0, 0.0, 0.0, 0.0
for key in self.id2label_sent.keys():
p, r, f, f05 = self.calculate_metrics(
self.sentence_correct[key],
self.sentence_predicted[key],
self.sentence_total[key])
label = "label=%s" % self.id2label_sent[key]
results[label + "_predicted_sent"] = self.sentence_predicted[key]
results[label + "_correct_sent"] = self.sentence_correct[key]
results[label + "_total_sent"] = self.sentence_total[key]
results[label + "_precision_sent"] = p
results[label + "_recall_sent"] = r
results[label + "_f-score_sent"] = f
results[label + "_f05-score_sent"] = f05
p_macro_sent += p
r_macro_sent += r
f_macro_sent += f
f05_macro_sent += f05
if key != 0:
p_non_default_macro_sent += p
r_non_default_macro_sent += r
f_non_default_macro_sent += f
f05_non_default_macro_sent += f05
p_macro_sent /= len(self.id2label_sent.keys())
r_macro_sent /= len(self.id2label_sent.keys())
f_macro_sent /= len(self.id2label_sent.keys())
f05_macro_sent /= len(self.id2label_sent.keys())
p_non_default_macro_sent /= (len(self.id2label_sent.keys()) - 1)
r_non_default_macro_sent /= (len(self.id2label_sent.keys()) - 1)
f_non_default_macro_sent /= (len(self.id2label_sent.keys()) - 1)
f05_non_default_macro_sent /= (len(self.id2label_sent.keys()) - 1)
p_micro_sent, r_micro_sent, f_micro_sent, f05_micro_sent = self.calculate_metrics(
sum(self.sentence_correct.values()),
sum(self.sentence_predicted.values()),
sum(self.sentence_total.values()))
p_non_default_micro_sent, r_non_default_micro_sent, \
f_non_default_micro_sent, f05_non_default_micro_sent = self.calculate_metrics(
sum([value for key, value in self.sentence_correct.items() if key != 0]),
sum([value for key, value in self.sentence_predicted.items() if key != 0]),
sum([value for key, value in self.sentence_total.items() if key != 0]))
results["precision_macro_sent"] = p_macro_sent
results["recall_macro_sent"] = r_macro_sent
results["f-score_macro_sent"] = f_macro_sent
results["f05-score_macro_sent"] = f05_macro_sent
results["precision_micro_sent"] = p_micro_sent
results["recall_micro_sent"] = r_micro_sent
results["f-score_micro_sent"] = f_micro_sent
results["f05-score_micro_sent"] = f05_micro_sent
results["precision_non_default_macro_sent"] = p_non_default_macro_sent
results["recall_non_default_macro_sent"] = r_non_default_macro_sent
results["f-score_non_default_macro_sent"] = f_non_default_macro_sent
results["f05-score_non_default_macro_sent"] = f05_non_default_macro_sent
results["precision_non_default_micro_sent"] = p_non_default_micro_sent
results["recall_non_default_micro_sent"] = r_non_default_micro_sent
results["f-score_non_default_micro_sent"] = f_non_default_micro_sent
results["f05-score_non_default_micro_sent"] = f05_non_default_micro_sent
if token_labels_available or "test" in name:
results["count_tok"] = self.count_tok
results["total_correct_tok"] = self.correct_binary_tok
results["accuracy_tok"] = (self.correct_binary_tok / float(self.count_tok)
if self.count_tok else 0.0)
# Calculate the micro and macro averages for the token predictions.
f_tok_macro, p_tok_macro, r_tok_macro, f05_tok_macro = 0.0, 0.0, 0.0, 0.0
f_non_default_macro_tok, p_non_default_macro_tok, \
r_non_default_macro_tok, f05_non_default_macro_tok = 0.0, 0.0, 0.0, 0.0
for key in self.id2label_tok.keys():
p, r, f, f05 = self.calculate_metrics(
self.token_correct[key], self.token_predicted[key], self.token_total[key])
label = "label=%s" % self.id2label_tok[key]
results[label + "_predicted_tok"] = self.token_predicted[key]
results[label + "_correct_tok"] = self.token_correct[key]
results[label + "_total_tok"] = self.token_total[key]
results[label + "_precision_tok"] = p
results[label + "_recall_tok"] = r
results[label + "_f-score_tok"] = f
results[label + "_tok_f05"] = f05
p_tok_macro += p
r_tok_macro += r
f_tok_macro += f
f05_tok_macro += f05
if key != 0:
p_non_default_macro_tok += p
r_non_default_macro_tok += r
f_non_default_macro_tok += f
f05_non_default_macro_tok += f05
p_tok_macro /= len(self.id2label_tok.keys())
r_tok_macro /= len(self.id2label_tok.keys())
f_tok_macro /= len(self.id2label_tok.keys())
f05_tok_macro /= len(self.id2label_tok.keys())
p_non_default_macro_tok /= (len(self.id2label_tok.keys()) - 1)
r_non_default_macro_tok /= (len(self.id2label_tok.keys()) - 1)
f_non_default_macro_tok /= (len(self.id2label_tok.keys()) - 1)
f05_non_default_macro_tok /= (len(self.id2label_tok.keys()) - 1)
p_tok_micro, r_tok_micro, f_tok_micro, f05_tok_micro = self.calculate_metrics(
sum(self.token_correct.values()),
sum(self.token_predicted.values()),
sum(self.token_total.values()))
p_non_default_micro_tok, r_non_default_micro_tok, \
f_non_default_micro_tok, f05_non_default_micro_tok = self.calculate_metrics(
sum([value for key, value in self.token_correct.items() if key != 0]),
sum([value for key, value in self.token_predicted.items() if key != 0]),
sum([value for key, value in self.token_total.items() if key != 0]))
results["precision_macro_tok"] = p_tok_macro
results["recall_macro_tok"] = r_tok_macro
results["f-score_macro_tok"] = f_tok_macro
results["f05-score_macro_tok"] = f05_tok_macro
results["precision_micro_tok"] = p_tok_micro
results["recall_micro_tok"] = r_tok_micro
results["f-score_micro_tok"] = f_tok_micro
results["f05-score_micro_tok"] = f05_tok_micro
results["precision_non_default_macro_tok"] = p_non_default_macro_tok
results["recall_non_default_macro_tok"] = r_non_default_macro_tok
results["f-score_non_default_macro_tok"] = f_non_default_macro_tok
results["f05-score_non_default_macro_tok"] = f05_non_default_macro_tok
results["precision_non_default_micro_tok"] = p_non_default_micro_tok
results["recall_non_default_micro_tok"] = r_non_default_micro_tok
results["f-score_non_default_micro_tok"] = f_non_default_micro_tok
results["f05-score_non_default_micro_tok"] = f05_non_default_micro_tok
if self.id2label_tok is not None and self.conll03_eval is True:
conll_counts = conlleval.evaluate(self.conll_format)
conll_metrics_overall, conll_metrics_by_type = conlleval.metrics(conll_counts)
results["conll_accuracy"] = (float(conll_counts.correct_tags)
/ float(conll_counts.token_counter))
results["conll_p"] = conll_metrics_overall.prec
results["conll_r"] = conll_metrics_overall.rec
results["conll_f"] = conll_metrics_overall.fscore
results["time"] = float(time.time()) - float(self.start_time)
return results
def get_results_nice_print(self, name, token_labels_available=True):
"""
This method is a wrapper around the statistical results already computed,
just to print them in a nicer format. Can also use it to check the basic metrics.
"""
if self.true_sent and self.pred_sent:
print("*" * 50)
print("Sentence predictions: ")
print(classification_report(
self.true_sent, self.pred_sent, digits=4,
labels=np.array(range(len(self.id2label_sent))),
target_names=[self.id2label_sent[i] for i in range(len(self.id2label_sent))]))
if token_labels_available or "test" in name:
if self.true_tok and self.pred_tok:
print("*" * 50)
print("Token predictions: ")
print(classification_report(
self.true_tok, self.pred_tok, digits=4,
labels=np.array(range(len(self.id2label_tok))),
target_names=[self.id2label_tok[i] for i in range(len(self.id2label_tok))]))
| 14,918 | 46.512739 | 99 | py |
multi-head-attention-labeller | multi-head-attention-labeller-master/second_model.py | from modules import label_smoothing
import collections
import numpy
import pickle
import re
import tensorflow as tf
class Model(object):
"""
Implements the multi-head attention labeller (MHAL)
without keys, queries, and values.
It only uses a simple, soft attention.
"""
def __init__(self, config, label2id_sent, label2id_tok):
self.config = config
self.label2id_sent = label2id_sent
self.label2id_tok = label2id_tok
self.UNK = "<unk>"
self.CUNK = "<cunk>"
self.word2id = None
self.char2id = None
self.singletons = None
self.num_heads = None
self.word_ids = None
self.char_ids = None
self.sentence_lengths = None
self.word_lengths = None
self.sentence_labels = None
self.word_labels = None
self.word_embeddings = None
self.char_embeddings = None
self.word_objective_weights = None
self.sentence_objective_weights = None
self.learning_rate = None
self.loss = None
self.initializer = None
self.is_training = None
self.session = None
self.saver = None
self.train_op = None
self.sentence_predictions = None
self.sentence_probabilities = None
self.token_predictions = None
self.token_probabilities = None
def build_vocabs(self, data_train, data_dev, data_test, embedding_path=None):
"""
Builds the vocabulary based on the the data and embeddings info.
"""
data_source = list(data_train)
if self.config["vocab_include_devtest"]:
if data_dev is not None:
data_source += data_dev
if data_test is not None:
data_source += data_test
char_counter = collections.Counter()
word_counter = collections.Counter()
for sentence in data_source:
for token in sentence.tokens:
char_counter.update(token.value)
w = token.value
if self.config["lowercase"]:
w = w.lower()
if self.config["replace_digits"]:
w = re.sub(r'\d', '0', w)
word_counter[w] += 1
self.char2id = collections.OrderedDict([(self.CUNK, 0)])
for char, count in char_counter.most_common():
if char not in self.char2id:
self.char2id[char] = len(self.char2id)
self.word2id = collections.OrderedDict([(self.UNK, 0)])
for word, count in word_counter.most_common():
if self.config["min_word_freq"] <= 0 or count >= self.config["min_word_freq"]:
if word not in self.word2id:
self.word2id[word] = len(self.word2id)
self.singletons = set([word for word in word_counter if word_counter[word] == 1])
if embedding_path and self.config["vocab_only_embedded"]:
embedding_vocab = {self.UNK}
with open(embedding_path) as f:
for line in f:
line_parts = line.strip().split()
if len(line_parts) <= 2:
continue
w = line_parts[0]
if self.config["lowercase"]:
w = w.lower()
if self.config["replace_digits"]:
w = re.sub(r'\d', '0', w)
embedding_vocab.add(w)
word2id_revised = collections.OrderedDict()
for word in self.word2id:
if word in embedding_vocab and word not in word2id_revised:
word2id_revised[word] = len(word2id_revised)
self.word2id = word2id_revised
print("Total number of words: " + str(len(self.word2id)))
print("Total number of chars: " + str(len(self.char2id)))
print("Total number of singletons: " + str(len(self.singletons)))
def construct_network(self):
"""
Constructs a variant of the multi-head attention labeller (MHAL)
that does not use keys, queries and values, but only a simple form
of additive attention, as proposed by Yang et al. (2016).
"""
self.word_ids = tf.placeholder(tf.int32, [None, None], name="word_ids")
self.char_ids = tf.placeholder(tf.int32, [None, None, None], name="char_ids")
self.sentence_lengths = tf.placeholder(tf.int32, [None], name="sentence_lengths")
self.word_lengths = tf.placeholder(tf.int32, [None, None], name="word_lengths")
self.sentence_labels = tf.placeholder(tf.float32, [None], name="sentence_labels")
self.word_labels = tf.placeholder(tf.float32, [None, None], name="word_labels")
self.word_objective_weights = tf.placeholder(
tf.float32, [None, None], name="word_objective_weights")
self.sentence_objective_weights = tf.placeholder(
tf.float32, [None], name="sentence_objective_weights")
self.learning_rate = tf.placeholder(tf.float32, name="learning_rate")
self.is_training = tf.placeholder(tf.int32, name="is_training")
self.loss = 0.0
if self.config["initializer"] == "normal":
self.initializer = tf.random_normal_initializer(stddev=0.1)
elif self.config["initializer"] == "glorot":
self.initializer = tf.glorot_uniform_initializer()
elif self.config["initializer"] == "xavier":
self.initializer = tf.glorot_normal_initializer()
zeros_initializer = tf.zeros_initializer()
self.word_embeddings = tf.get_variable(
name="word_embeddings",
shape=[len(self.word2id), self.config["word_embedding_size"]],
initializer=(zeros_initializer if self.config["emb_initial_zero"] else self.initializer),
trainable=(True if self.config["train_embeddings"] else False))
word_input_tensor = tf.nn.embedding_lookup(self.word_embeddings, self.word_ids)
if self.config["char_embedding_size"] > 0 and self.config["char_recurrent_size"] > 0:
with tf.variable_scope("chars"), tf.control_dependencies(
[tf.assert_equal(tf.shape(self.char_ids)[2],
tf.reduce_max(self.word_lengths),
message="Char dimensions don't match")]):
self.char_embeddings = tf.get_variable(
name="char_embeddings",
shape=[len(self.char2id), self.config["char_embedding_size"]],
initializer=self.initializer,
trainable=True)
char_input_tensor = tf.nn.embedding_lookup(self.char_embeddings, self.char_ids)
char_input_tensor_shape = tf.shape(char_input_tensor)
char_input_tensor = tf.reshape(
char_input_tensor,
shape=[char_input_tensor_shape[0]
* char_input_tensor_shape[1],
char_input_tensor_shape[2],
self.config["char_embedding_size"]])
_word_lengths = tf.reshape(
self.word_lengths, shape=[char_input_tensor_shape[0]
* char_input_tensor_shape[1]])
char_lstm_cell_fw = tf.nn.rnn_cell.LSTMCell(
self.config["char_recurrent_size"],
use_peepholes=self.config["lstm_use_peepholes"],
state_is_tuple=True,
initializer=self.initializer,
reuse=False)
char_lstm_cell_bw = tf.nn.rnn_cell.LSTMCell(
self.config["char_recurrent_size"],
use_peepholes=self.config["lstm_use_peepholes"],
state_is_tuple=True,
initializer=self.initializer,
reuse=False)
# Concatenate the final forward and the backward character contexts
# to obtain a compact character representation for each word.
_, ((_, char_output_fw), (_, char_output_bw)) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=char_lstm_cell_fw, cell_bw=char_lstm_cell_bw, inputs=char_input_tensor,
sequence_length=_word_lengths, dtype=tf.float32, time_major=False)
char_output_tensor = tf.concat([char_output_fw, char_output_bw], axis=-1)
char_output_tensor = tf.reshape(
char_output_tensor,
shape=[char_input_tensor_shape[0], char_input_tensor_shape[1],
2 * self.config["char_recurrent_size"]])
# Include a char-based language modelling loss, LMc.
if self.config["lm_cost_char_gamma"] > 0.0:
self.loss += self.config["lm_cost_char_gamma"] * \
self.construct_lm_cost(
input_tensor_fw=char_output_tensor,
input_tensor_bw=char_output_tensor,
sentence_lengths=self.sentence_lengths,
target_ids=self.word_ids,
lm_cost_type="separate",
name="lm_cost_char_separate")
if self.config["lm_cost_joint_char_gamma"] > 0.0:
self.loss += self.config["lm_cost_joint_char_gamma"] * \
self.construct_lm_cost(
input_tensor_fw=char_output_tensor,
input_tensor_bw=char_output_tensor,
sentence_lengths=self.sentence_lengths,
target_ids=self.word_ids,
lm_cost_type="joint",
name="lm_cost_char_joint")
if self.config["char_hidden_layer_size"] > 0:
char_output_tensor = tf.layers.dense(
inputs=char_output_tensor, units=self.config["char_hidden_layer_size"],
activation=tf.tanh, kernel_initializer=self.initializer)
if self.config["char_integration_method"] == "concat":
word_input_tensor = tf.concat([word_input_tensor, char_output_tensor], axis=-1)
elif self.config["char_integration_method"] == "none":
word_input_tensor = word_input_tensor
else:
raise ValueError("Unknown char integration method")
if self.config["dropout_input"] > 0.0:
dropout_input = (self.config["dropout_input"] * tf.cast(self.is_training, tf.float32)
+ (1.0 - tf.cast(self.is_training, tf.float32)))
word_input_tensor = tf.nn.dropout(
word_input_tensor, dropout_input, name="dropout_word")
word_lstm_cell_fw = tf.nn.rnn_cell.LSTMCell(
self.config["word_recurrent_size"],
use_peepholes=self.config["lstm_use_peepholes"],
state_is_tuple=True,
initializer=self.initializer,
reuse=False)
word_lstm_cell_bw = tf.nn.rnn_cell.LSTMCell(
self.config["word_recurrent_size"],
use_peepholes=self.config["lstm_use_peepholes"],
state_is_tuple=True,
initializer=self.initializer,
reuse=False)
with tf.control_dependencies(
[tf.assert_equal(
tf.shape(self.word_ids)[1],
tf.reduce_max(self.sentence_lengths),
message="Sentence dimensions don't match")]):
(lstm_outputs_fw, lstm_outputs_bw), ((_, lstm_output_fw), (_, lstm_output_bw)) = \
tf.nn.bidirectional_dynamic_rnn(
cell_fw=word_lstm_cell_fw, cell_bw=word_lstm_cell_bw, inputs=word_input_tensor,
sequence_length=self.sentence_lengths, dtype=tf.float32, time_major=False)
lstm_output_states = tf.concat([lstm_output_fw, lstm_output_bw], axis=-1)
if self.config["dropout_word_lstm"] > 0.0:
dropout_word_lstm = (self.config["dropout_word_lstm"] * tf.cast(self.is_training, tf.float32)
+ (1.0 - tf.cast(self.is_training, tf.float32)))
lstm_outputs_fw = tf.nn.dropout(
lstm_outputs_fw, dropout_word_lstm,
noise_shape=tf.convert_to_tensor(
[tf.shape(self.word_ids)[0], 1, self.config["word_recurrent_size"]], dtype=tf.int32))
lstm_outputs_bw = tf.nn.dropout(
lstm_outputs_bw, dropout_word_lstm,
noise_shape=tf.convert_to_tensor(
[tf.shape(self.word_ids)[0], 1, self.config["word_recurrent_size"]], dtype=tf.int32))
lstm_output_states = tf.nn.dropout(lstm_output_states, dropout_word_lstm)
# The forward and backward states are concatenated at every token position.
lstm_outputs_states = tf.concat([lstm_outputs_fw, lstm_outputs_bw], axis=-1)
if self.config["whidden_layer_size"] > 0:
lstm_outputs_states = tf.layers.dense(
lstm_outputs_states, self.config["whidden_layer_size"],
activation=tf.tanh, kernel_initializer=self.initializer)
if self.config["model_type"] == "last":
processed_tensor = lstm_output_states
token_scores = tf.layers.dense(
lstm_outputs_states, units=len(self.label2id_tok),
kernel_initializer=self.initializer,
name="token_scores_last_lstm_outputs_ff")
if self.config["hidden_layer_size"] > 0:
processed_tensor = tf.layers.dense(
processed_tensor, units=self.config["hidden_layer_size"],
activation=tf.tanh, kernel_initializer=self.initializer)
sentence_scores = tf.layers.dense(
processed_tensor, units=len(self.label2id_sent),
kernel_initializer=self.initializer,
name="sentence_scores_last_lstm_outputs_ff")
else:
with tf.variable_scope("attention"):
token_scores_list = []
sentence_scores_list = []
for i in range(len(self.label2id_tok)):
keys = tf.layers.dense(
lstm_outputs_states, units=self.config["attention_evidence_size"],
activation=tf.tanh, kernel_initializer=self.initializer)
values = tf.layers.dense(
lstm_outputs_states, units=self.config["attention_evidence_size"],
activation=tf.tanh, kernel_initializer=self.initializer)
token_scores_head = tf.layers.dense(
keys, units=1, kernel_initializer=self.initializer) # [B, M, 1]
token_scores_head = tf.reshape(
token_scores_head, shape=tf.shape(self.word_ids)) # [B, M]
token_scores_list.append(token_scores_head)
if self.config["attention_activation"] == "sharp":
attention_weights_unnormalized = tf.exp(token_scores_head)
elif self.config["attention_activation"] == "soft":
attention_weights_unnormalized = tf.sigmoid(token_scores_head)
elif self.config["attention_activation"] == "linear":
attention_weights_unnormalized = token_scores_head
else:
raise ValueError("Unknown/unsupported token scoring method: %s"
% self.config["attention_activation"])
attention_weights_unnormalized = tf.where(
tf.sequence_mask(self.sentence_lengths),
attention_weights_unnormalized,
tf.zeros_like(attention_weights_unnormalized))
attention_weights = attention_weights_unnormalized / tf.reduce_sum(
attention_weights_unnormalized, axis=1, keep_dims=True) # [B, M]
processed_tensor = tf.reduce_sum(
values * attention_weights[:, :, numpy.newaxis], axis=1) # [B, E]
if self.config["hidden_layer_size"] > 0:
processed_tensor = tf.layers.dense(
processed_tensor, units=self.config["hidden_layer_size"],
activation=tf.tanh, kernel_initializer=self.initializer)
sentence_score_head = tf.layers.dense(
processed_tensor, units=1,
kernel_initializer=self.initializer,
name="output_ff_head_%d" % i) # [B, 1]
sentence_score_head = tf.reshape(
sentence_score_head, shape=[tf.shape(processed_tensor)[0]]) # [B]
sentence_scores_list.append(sentence_score_head)
token_scores = tf.stack(token_scores_list, axis=-1) # [B, M, H]
all_sentence_scores = tf.stack(sentence_scores_list, axis=-1) # [B, H]
if len(self.label2id_tok) != len(self.label2id_sent):
if len(self.label2id_sent) == 2:
default_sentence_score = tf.gather(
all_sentence_scores, indices=[0], axis=1) # [B, 1]
maximum_non_default_sentence_score = tf.gather(
all_sentence_scores, indices=list(
range(1, len(self.label2id_tok))), axis=1) # [B, num_heads-1]
maximum_non_default_sentence_score = tf.reduce_max(
maximum_non_default_sentence_score, axis=1, keep_dims=True) # [B, 1]
sentence_scores = tf.concat(
[default_sentence_score, maximum_non_default_sentence_score],
axis=-1, name="sentence_scores_concatenation") # [B, 2]
else:
sentence_scores = tf.layers.dense(
all_sentence_scores, units=len(self.label2id_sent),
kernel_initializer=self.initializer) # [B, num_sent_labels]
else:
sentence_scores = all_sentence_scores
# Mask the token scores that do not fall in the range of the true sentence length.
# Do this for each head (change shape from [B, M] to [B, M, num_heads]).
tiled_sentence_lengths = tf.tile(
input=tf.expand_dims(
tf.sequence_mask(self.sentence_lengths), axis=-1),
multiples=[1, 1, len(self.label2id_tok)])
self.token_probabilities = tf.nn.softmax(token_scores, axis=-1)
self.token_probabilities = tf.where(
tiled_sentence_lengths,
self.token_probabilities,
tf.zeros_like(self.token_probabilities))
self.token_predictions = tf.argmax(self.token_probabilities, axis=2)
self.sentence_probabilities = tf.nn.softmax(sentence_scores)
self.sentence_predictions = tf.argmax(self.sentence_probabilities, axis=1)
if self.config["word_objective_weight"] > 0:
word_objective_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=token_scores, labels=tf.cast(self.word_labels, tf.int32))
word_objective_loss = tf.where(
tf.sequence_mask(self.sentence_lengths),
word_objective_loss,
tf.zeros_like(word_objective_loss))
self.loss += self.config["word_objective_weight"] * tf.reduce_sum(
self.word_objective_weights * word_objective_loss)
if self.config["sentence_objective_weight"] > 0:
self.loss += self.config["sentence_objective_weight"] * tf.reduce_sum(
self.sentence_objective_weights *
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=sentence_scores, labels=tf.cast(self.sentence_labels, tf.int32)))
max_over_token_heads = tf.reduce_max(self.token_probabilities, axis=1) # [B, H]
one_hot_sentence_labels = tf.one_hot(
tf.cast(self.sentence_labels, tf.int32),
depth=len(self.label2id_sent))
if self.config["enable_label_smoothing"]:
one_hot_sentence_labels_smoothed = label_smoothing(
one_hot_sentence_labels, epsilon=self.config["smoothing_epsilon"])
else:
one_hot_sentence_labels_smoothed = one_hot_sentence_labels
# At least one token has a label corresponding to the true sentence label.
# This loss also pushes the maximums over the other heads towards 0 (but smoothed).
if self.config["type1_attention_objective_weight"] > 0:
this_max_over_token_heads = max_over_token_heads
if len(self.label2id_tok) != len(self.label2id_sent):
if len(self.label2id_sent) == 2:
max_default_head = tf.gather(
max_over_token_heads, indices=[0], axis=-1) # [B, 1]
max_non_default_head = tf.reduce_max(tf.gather(
max_over_token_heads, indices=list(
range(1, len(self.label2id_tok))), axis=-1),
axis=1, keep_dims=True) # [B, 1]
this_max_over_token_heads = tf.concat(
[max_default_head, max_non_default_head], axis=-1) # [B, 2]
else:
raise ValueError(
"Unsupported attention loss for num_heads != num_sent_lables "
"and num_sentence_labels != 2.")
self.loss += self.config["type1_attention_objective_weight"] * (
tf.reduce_sum(self.sentence_objective_weights * tf.reduce_sum(tf.square(
this_max_over_token_heads - one_hot_sentence_labels_smoothed), axis=-1)))
# The predicted distribution over the token labels (heads) should be similar to the
# predicted distribution over the sentence representations.
if self.config["type2_attention_objective_weight"] > 0:
all_sentence_scores_probabilities = tf.nn.softmax(all_sentence_scores) # [B, H]
self.loss += self.config["type2_attention_objective_weight"] * (
tf.reduce_sum(self.sentence_objective_weights * tf.reduce_sum(tf.square(
max_over_token_heads - all_sentence_scores_probabilities), axis=-1)))
# At least one token has a label corresponding to the true sentence label.
if self.config["type3_attention_objective_weight"] > 0:
this_max_over_token_heads = max_over_token_heads
if len(self.label2id_tok) != len(self.label2id_sent):
if len(self.label2id_sent) == 2:
max_default_head = tf.gather(
max_over_token_heads, indices=[0], axis=-1) # [B, 1]
max_non_default_head = tf.reduce_max(tf.gather(
max_over_token_heads, indices=list(
range(1, len(self.label2id_tok))), axis=-1),
axis=1, keep_dims=True) # [B, 1]
this_max_over_token_heads = tf.concat(
[max_default_head, max_non_default_head], axis=-1) # [B, 2]
else:
raise ValueError(
"Unsupported attention loss for num_heads != num_sent_lables "
"and num_sentence_labels != 2.")
self.loss += self.config["type3_attention_objective_weight"] * (
tf.reduce_sum(self.sentence_objective_weights * tf.reduce_sum(tf.square(
(this_max_over_token_heads * one_hot_sentence_labels)
- one_hot_sentence_labels_smoothed), axis=-1)))
# A sentence that has a default label, should only contain tokens labeled as default.
if self.config["type4_attention_objective_weight"] > 0:
default_head = tf.gather(self.token_probabilities, indices=[0], axis=-1) # [B, M, 1]
default_head = tf.squeeze(default_head, axis=-1) # [B, M]
self.loss += self.config["type4_attention_objective_weight"] * (
tf.reduce_sum(self.sentence_objective_weights * tf.cast(
tf.equal(self.sentence_labels, 0.0), tf.float32) * tf.reduce_sum(
tf.square(default_head - tf.ones_like(default_head)), axis=-1)))
# Every sentence has at least one default label.
if self.config["type5_attention_objective_weight"] > 0:
default_head = tf.gather(self.token_probabilities, indices=[0], axis=-1) # [B, M, 1]
max_default_head = tf.reduce_max(tf.squeeze(default_head, axis=-1), axis=-1) # [B]
self.loss += self.config["type5_attention_objective_weight"] * (
tf.reduce_sum(self.sentence_objective_weights * tf.square(
max_default_head - tf.ones_like(max_default_head))))
# Include a word-based language modelling loss, LMw.
if self.config["lm_cost_lstm_gamma"] > 0.0:
self.loss += self.config["lm_cost_lstm_gamma"] * self.construct_lm_cost(
input_tensor_fw=lstm_outputs_fw,
input_tensor_bw=lstm_outputs_bw,
sentence_lengths=self.sentence_lengths,
target_ids=self.word_ids,
lm_cost_type="separate",
name="lm_cost_lstm_separate")
if self.config["lm_cost_joint_lstm_gamma"] > 0.0:
self.loss += self.config["lm_cost_joint_lstm_gamma"] * self.construct_lm_cost(
input_tensor_fw=lstm_outputs_fw,
input_tensor_bw=lstm_outputs_bw,
sentence_lengths=self.sentence_lengths,
target_ids=self.word_ids,
lm_cost_type="joint",
name="lm_cost_lstm_joint")
self.train_op = self.construct_optimizer(
opt_strategy=self.config["opt_strategy"],
loss=self.loss,
learning_rate=self.learning_rate,
clip=self.config["clip"])
print("Notwork built.")
def construct_lm_cost(
self, input_tensor_fw, input_tensor_bw,
sentence_lengths, target_ids, lm_cost_type, name):
"""
Constructs the char/word-based language modelling objective.
"""
with tf.variable_scope(name):
lm_cost_max_vocab_size = min(
len(self.word2id), self.config["lm_cost_max_vocab_size"])
target_ids = tf.where(
tf.greater_equal(target_ids, lm_cost_max_vocab_size - 1),
x=(lm_cost_max_vocab_size - 1) + tf.zeros_like(target_ids),
y=target_ids)
cost = 0.0
if lm_cost_type == "separate":
lm_cost_fw_mask = tf.sequence_mask(
sentence_lengths, maxlen=tf.shape(target_ids)[1])[:, 1:]
lm_cost_bw_mask = tf.sequence_mask(
sentence_lengths, maxlen=tf.shape(target_ids)[1])[:, :-1]
lm_cost_fw = self._construct_lm_cost(
input_tensor_fw[:, :-1, :],
lm_cost_max_vocab_size,
lm_cost_fw_mask,
target_ids[:, 1:],
name=name + "_fw")
lm_cost_bw = self._construct_lm_cost(
input_tensor_bw[:, 1:, :],
lm_cost_max_vocab_size,
lm_cost_bw_mask,
target_ids[:, :-1],
name=name + "_bw")
cost += lm_cost_fw + lm_cost_bw
elif lm_cost_type == "joint":
joint_input_tensor = tf.concat(
[input_tensor_fw[:, :-2, :], input_tensor_bw[:, 2:, :]], axis=-1)
lm_cost_mask = tf.sequence_mask(
sentence_lengths, maxlen=tf.shape(target_ids)[1])[:, 1:-1]
cost += self._construct_lm_cost(
joint_input_tensor,
lm_cost_max_vocab_size,
lm_cost_mask,
target_ids[:, 1:-1],
name=name + "_joint")
else:
raise ValueError("Unknown lm_cost_type: %s." % lm_cost_type)
return cost
def _construct_lm_cost(
self, input_tensor, lm_cost_max_vocab_size,
lm_cost_mask, target_ids, name):
with tf.variable_scope(name):
lm_cost_hidden_layer = tf.layers.dense(
inputs=input_tensor, units=self.config["lm_cost_hidden_layer_size"],
activation=tf.tanh, kernel_initializer=self.initializer)
lm_cost_output = tf.layers.dense(
inputs=lm_cost_hidden_layer, units=lm_cost_max_vocab_size,
kernel_initializer=self.initializer)
lm_cost_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=lm_cost_output, labels=target_ids)
lm_cost_loss = tf.where(lm_cost_mask, lm_cost_loss, tf.zeros_like(lm_cost_loss))
return tf.reduce_sum(lm_cost_loss)
@staticmethod
def construct_optimizer(opt_strategy, loss, learning_rate, clip):
"""
Applies an optimization strategy to minimize the loss.
"""
if opt_strategy == "adadelta":
optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate)
elif opt_strategy == "adam":
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
elif opt_strategy == "sgd":
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
else:
raise ValueError("Unknown optimisation strategy: %s." % opt_strategy)
if clip > 0.0:
grads, vs = zip(*optimizer.compute_gradients(loss))
grads, gnorm = tf.clip_by_global_norm(grads, clip)
train_op = optimizer.apply_gradients(zip(grads, vs))
else:
train_op = optimizer.minimize(loss)
return train_op
def preload_word_embeddings(self, embedding_path):
"""
Load the word embeddings in advance to get a feel
of the proportion of singletons in the dataset.
"""
loaded_embeddings = set()
embedding_matrix = self.session.run(self.word_embeddings)
with open(embedding_path) as f:
for line in f:
line_parts = line.strip().split()
if len(line_parts) <= 2:
continue
w = line_parts[0]
if self.config["lowercase"]:
w = w.lower()
if self.config["replace_digits"]:
w = re.sub(r'\d', '0', w)
if w in self.word2id and w not in loaded_embeddings:
word_id = self.word2id[w]
embedding = numpy.array(line_parts[1:])
embedding_matrix[word_id] = embedding
loaded_embeddings.add(w)
self.session.run(self.word_embeddings.assign(embedding_matrix))
print("No. of pre-loaded embeddings: %d." % len(loaded_embeddings))
@staticmethod
def translate2id(
token, token2id, unk_token=None, lowercase=False,
replace_digits=False, singletons=None, singletons_prob=0.0):
"""
Maps each token/character to its index.
"""
if lowercase:
token = token.lower()
if replace_digits:
token = re.sub(r'\d', '0', token)
if singletons and token in singletons \
and token in token2id and unk_token \
and numpy.random.uniform() < singletons_prob:
token_id = token2id[unk_token]
elif token in token2id:
token_id = token2id[token]
elif unk_token:
token_id = token2id[unk_token]
else:
raise ValueError("Unable to handle value, no UNK token: %s." % token)
return token_id
def create_input_dictionary_for_batch(self, batch, is_training, learning_rate):
"""
Creates the dictionary fed to the the TF model.
"""
sentence_lengths = numpy.array([len(sentence.tokens) for sentence in batch])
max_sentence_length = sentence_lengths.max()
max_word_length = numpy.array(
[numpy.array([len(token.value) for token in sentence.tokens]).max()
for sentence in batch]).max()
if 0 < self.config["allowed_word_length"] < max_word_length:
max_word_length = min(max_word_length, self.config["allowed_word_length"])
word_ids = numpy.zeros(
(len(batch), max_sentence_length), dtype=numpy.int32)
char_ids = numpy.zeros(
(len(batch), max_sentence_length, max_word_length), dtype=numpy.int32)
word_lengths = numpy.zeros(
(len(batch), max_sentence_length), dtype=numpy.int32)
word_labels = numpy.zeros(
(len(batch), max_sentence_length), dtype=numpy.float32)
sentence_labels = numpy.zeros(
(len(batch)), dtype=numpy.float32)
word_objective_weights = numpy.zeros(
(len(batch), max_sentence_length), dtype=numpy.float32)
sentence_objective_weights = numpy.zeros((len(batch)), dtype=numpy.float32)
# A proportion of the singletons are assigned to UNK (do this just for training).
singletons = self.singletons if is_training else None
singletons_prob = self.config["singletons_prob"] if is_training else 0.0
for i, sentence in enumerate(batch):
sentence_labels[i] = sentence.label_sent
if sentence_labels[i] != 0:
if self.config["sentence_objective_weights_non_default"] > 0.0:
sentence_objective_weights[i] = self.config[
"sentence_objective_weights_non_default"]
else:
sentence_objective_weights[i] = 1.0
else:
sentence_objective_weights[i] = 1.0
for j, token in enumerate(sentence.tokens):
word_ids[i][j] = self.translate2id(
token=token.value,
token2id=self.word2id,
unk_token=self.UNK,
lowercase=self.config["lowercase"],
replace_digits=self.config["replace_digits"],
singletons=singletons,
singletons_prob=singletons_prob)
word_labels[i][j] = token.label_tok
word_lengths[i][j] = len(token.value)
for k in range(min(len(token.value), max_word_length)):
char_ids[i][j][k] = self.translate2id(
token=token.value[k],
token2id=self.char2id,
unk_token=self.CUNK)
if token.enable_supervision is True:
word_objective_weights[i][j] = 1.0
input_dictionary = {
self.word_ids: word_ids,
self.char_ids: char_ids,
self.sentence_lengths: sentence_lengths,
self.word_lengths: word_lengths,
self.sentence_labels: sentence_labels,
self.word_labels: word_labels,
self.word_objective_weights: word_objective_weights,
self.sentence_objective_weights: sentence_objective_weights,
self.learning_rate: learning_rate,
self.is_training: is_training}
return input_dictionary
def process_batch(self, batch, is_training, learning_rate):
"""
Processes a batch of sentences.
:param batch: a set of sentences of size "max_batch_size".
:param is_training: whether the current batch is a training instance or not.
:param learning_rate: the pace at which learning should be performed.
:return: the cost, the sentence predictions, the sentence label distribution,
the token predictions and the token label distribution.
"""
feed_dict = self.create_input_dictionary_for_batch(batch, is_training, learning_rate)
cost, sentence_pred, sentence_prob, token_pred, token_prob = self.session.run(
[self.loss, self.sentence_predictions, self.sentence_probabilities,
self.token_predictions, self.token_probabilities] +
([self.train_op] if is_training else []), feed_dict=feed_dict)[:5]
return cost, sentence_pred, sentence_prob, token_pred, token_prob
def initialize_session(self):
"""
Initializes a tensorflow session and sets the random seed.
"""
tf.set_random_seed(self.config["random_seed"])
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = self.config["tf_allow_growth"]
session_config.gpu_options.per_process_gpu_memory_fraction = self.config[
"tf_per_process_gpu_memory_fraction"]
self.session = tf.Session(config=session_config)
self.session.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(max_to_keep=1)
@staticmethod
def get_parameter_count():
"""
Counts the total number of parameters.
"""
total_parameters = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
return total_parameters
def get_parameter_count_without_word_embeddings(self):
"""
Counts the number of parameters without those introduced by word embeddings.
"""
shape = self.word_embeddings.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
return self.get_parameter_count() - variable_parameters
def save(self, filename):
"""
Saves a trained model to the path in filename.
"""
dump = dict()
dump["config"] = self.config
dump["label2id_sent"] = self.label2id_sent
dump["label2id_tok"] = self.label2id_tok
dump["UNK"] = self.UNK
dump["CUNK"] = self.CUNK
dump["word2id"] = self.word2id
dump["char2id"] = self.char2id
dump["singletons"] = self.singletons
dump["params"] = {}
for variable in tf.global_variables():
assert (
variable.name not in dump["params"]), \
"Error: variable with this name already exists: %s." % variable.name
dump["params"][variable.name] = self.session.run(variable)
with open(filename, 'wb') as f:
pickle.dump(dump, f, protocol=pickle.HIGHEST_PROTOCOL)
@staticmethod
def load(filename, new_config=None):
"""
Loads a pre-trained MHAL model.
"""
with open(filename, 'rb') as f:
dump = pickle.load(f)
dump["config"]["save"] = None
# Use the saved config, except for values that are present in the new config.
if new_config:
for key in new_config:
dump["config"][key] = new_config[key]
labeler = Model(dump["config"], dump["label2id_sent"], dump["label2id_tok"])
labeler.UNK = dump["UNK"]
labeler.CUNK = dump["CUNK"]
labeler.word2id = dump["word2id"]
labeler.char2id = dump["char2id"]
labeler.singletons = dump["singletons"]
labeler.construct_network()
labeler.initialize_session()
labeler.load_params(filename)
return labeler
def load_params(self, filename):
"""
Loads the parameters of a trained model.
"""
with open(filename, 'rb') as f:
dump = pickle.load(f)
for variable in tf.global_variables():
assert (variable.name in dump["params"]), \
"Variable not in dump: %s." % variable.name
assert (variable.shape == dump["params"][variable.name].shape), \
"Variable shape not as expected: %s, of shape %s. %s" % (
variable.name, str(variable.shape),
str(dump["params"][variable.name].shape))
value = numpy.asarray(dump["params"][variable.name])
self.session.run(variable.assign(value))
| 41,034 | 48.026284 | 105 | py |
multi-head-attention-labeller | multi-head-attention-labeller-master/visualize.py | import matplotlib as mpl
mpl.use("agg")
mpl.rcParams['xtick.labelsize'] = 20
mpl.rcParams['ytick.labelsize'] = 20
import matplotlib.pyplot as plt
import time
from tqdm import tqdm
import numpy as np
html_header = '<!DOCTYPE html>\n<html>\n<font size="3">\n<head>\n<meta charset="UTF-8">\n<body>\n'
html_footer = '</body></font></html>'
# A couple of colours (expecting no more than 10 heads). Add more if needed.
head_colours = [
[0.75, 0.75, 0.75], # grey for default heads
[0.9, 0.0, 0.0], # red
[0.6, 0.0, 1.0], # purple
[1.0, 0.6, 0.0], # orange
[0.0, 1.0, 0.0], # green
[0.0, 0.0, 0.9], # blue
[1.0, 0.0, 1.0], # pink
[1.0, 1.0, 0.3], # yellow
[0.0, 0.6, 1.0], # another type of green
[0.5, 1.0, 0.0], # another type of blue
]
head_colours_sent = [[0.8, 0.0, 0.4], [0.0, 0.4, 0.4]] # for binary-labelled sentences
def plot_token_scores(
token_probs, sentence, id2label_tok,
plot_name=None, show=False):
"""
Plot the (normalized) token scores onto a grid of heads.
:param token_probs: normalized token scores of shape [batch_size, num_heads].
:param sentence: contains all the tokens corresponding to the token probs.
:param id2label_tok: dictionary mapping ids to token labels.
:param plot_name: name of file where to save the plot. Doesn't save it if None.
:param show: whether to show or not the plot to the screen.
:return: Nothing, just plot the token scores.
"""
sentence_length = len(sentence.tokens)
token_probs = token_probs[:][:sentence_length].T
(nrows, ncols) = token_probs.shape
color_data = []
for i, [r, g, b] in enumerate(head_colours[:nrows]):
row = []
for j in range(ncols):
row.append([r, g, b, token_probs[i][j]])
color_data.append(row)
plt.figure(figsize=(16, 12), dpi=100)
row_labels = ["O"] + [str(id2label_tok[i + 1]) for i in range(nrows-1)]
col_labels = [token.value for token in sentence.tokens]
plt.imshow(color_data, vmin=0, vmax=sentence_length)
plt.xticks(range(ncols), col_labels, rotation=45)
plt.yticks(range(nrows), row_labels)
plt.tight_layout()
if plot_name is not None:
plt.savefig("%s_%d.png" % (plot_name, int(time.time())),
format="png", dpi=100, bbox_inches='tight', pad_inches=0)
if show:
plt.show()
def plot_predictions(
all_sentences, all_sentence_probs, all_token_probs,
id2label_tok, html_name, sent_binary=False):
"""
Writes a HTML file with the predictions at the sentence and token level.
:param all_sentences: list of all the sentences in all batches.
:param all_sentence_probs: a list of all the sentence probabilities in all batches;
each batch of sentence_prob has shape [B, num_sent_labels] and must contain normalized data.
:param all_token_probs: a list of all the token probabilities in all batches;
each batch of token_probs has shape [B, M, num_tok_labels] and must contain normalized data.
:param id2label_tok: dictionary mapping ids to token labels.
:param html_name: name of the html file that will be produced.
:param sent_binary: whether the sentence labels are binary or not. This is needed
to use different colours than the token labels if the sentence labels don't match
the token labels (for our purposes, this happens when the sentence labels are binary).
:return: Nothing, just saves a html file with the coloured predictions,
which you can see in your browser.
"""
html_filename = "%s_%d.html" % (html_name, int(time.time()))
print("Plotting predictions across all batches..."
"Saving to html file %s" % html_filename)
with open(html_filename, "w") as html_file:
# Write the usual html file header.
html_file.write(html_header)
# Print a legend of the colours assigned to the sentence and token labels.
html_file.write(' ============================== ')
html_file.write('<br>')
html_file.write('LEGEND')
html_file.write('<br>')
html_file.write(' ============================== ')
html_file.write('<br>')
if sent_binary:
html_file.write('Sentence labels to colours: ')
[r, g, b] = head_colours_sent[0]
html_file.write(
'<font style="background: rgba(%d, %d, %d, %f)"><b>%s</b></font>\n'
% (int(r * 255), int(g * 255), int(b * 255),
1.0, "DEFAULT"))
[r, g, b] = head_colours_sent[1]
html_file.write(
'<font style="background: rgba(%d, %d, %d, %f)"><b>%s</b></font>\n'
% (int(r * 255), int(g * 255), int(b * 255),
1.0, "NON-DEFAULT"))
html_file.write('<br>')
html_file.write('Token labels to colours: ')
else:
html_file.write('Sentence/Token labels to colours: ')
for i in range(len(id2label_tok)):
[r, g, b] = head_colours[i]
html_file.write(
'<font style="background: rgba(%d, %d, %d, %f)"><b>%s</b></font>\n'
% (int(r * 255), int(g * 255), int(b * 255),
1.0, str(id2label_tok[i])))
html_file.write('<br>')
html_file.write(' ============================== ')
html_file.write('<br><br>')
# Go through each batch.
for sentences, sentence_probs, token_probs in tqdm(zip(
all_sentences, all_sentence_probs, all_token_probs),
total=len(all_sentences)):
# Go through each sentence in the batch.
for sent, sent_prob, tok_probs_this_sent in zip(
sentences, sentence_probs, token_probs):
assert all(0 <= prob <= 1 for prob in sent_prob), \
"Passed sent_prob = %f which is not a valid probability!" \
% sent_prob
# Represent by colour the gold and the predicted sentence labels.
predicted_sent_label = int(np.argmax(sent_prob))
gold_sent_label = sent.label_sent
alpha_sent = sent_prob[predicted_sent_label]
if sent_binary:
[r_pred, g_pred, b_pred] = head_colours_sent[predicted_sent_label]
[r_gold, g_gold, b_gold] = head_colours_sent[gold_sent_label]
else:
[r_pred, g_pred, b_pred] = head_colours[predicted_sent_label]
[r_gold, g_gold, b_gold] = head_colours[gold_sent_label]
html_file.write(
'<font style="background: rgba(%d, %d, %d, %f)">%s</font>\n'
% (int(r_pred * 255), int(g_pred * 255), int(b_pred * 255),
alpha_sent, "<b>PRED</b>"))
html_file.write(
'<font style="background: rgba(%d, %d, %d, %f)">%s</font>\n'
% (int(r_gold * 255), int(g_gold * 255), int(b_gold * 255),
0.9, "<b>GOLD</b>"))
# Write each token in the colour background of its most probable
# head prediction. Incorrect predictions will be underlined.
for token, tok_prob in zip(sent.tokens, tok_probs_this_sent):
assert all(0 <= prob <= 1 for prob in tok_prob), \
"Passed tok_prob = %f which is not a valid probability!" \
% tok_prob
predicted_head = int(np.argmax(tok_prob))
alpha_tok = tok_prob[predicted_head]
[r, g, b] = head_colours[predicted_head]
if predicted_head == token.label_tok:
token_html = "%s" % token.value
else:
token_html = "<u>%s</u>" % token.value
html_file.write(
'<font style="background: rgba(%d, %d, %d, %f)">%s</font>\n'
% (int(r * 255), int(g * 255), int(b * 255),
alpha_tok, token_html))
html_file.write('<br><br>')
html_file.write(html_footer)
print("HTML visualizations: Done!")
| 8,269 | 44.191257 | 98 | py |
P-STMO | P-STMO-main/run_3dhp.py | import os
import glob
import torch
import random
import logging
import numpy as np
from tqdm import tqdm
import torch.nn as nn
import torch.utils.data
import torch.optim as optim
from common.opt import opts
from common.utils import *
from common.camera import get_uvd2xyz
from common.load_data_3dhp_mae import Fusion
from common.h36m_dataset import Human36mDataset
from model.block.refine import refine
from model.stmo import Model
from model.stmo_pretrain import Model_MAE
from thop import clever_format
from thop.profile import profile
import scipy.io as scio
opt = opts().parse()
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu
def train(opt, actions, train_loader, model, optimizer, epoch):
return step('train', opt, actions, train_loader, model, optimizer, epoch)
def val(opt, actions, val_loader, model):
with torch.no_grad():
return step('test', opt, actions, val_loader, model)
def step(split, opt, actions, dataLoader, model, optimizer=None, epoch=None):
model_trans = model['trans']
model_refine = model['refine']
model_MAE = model['MAE']
if split == 'train':
model_trans.train()
model_refine.train()
model_MAE.train()
else:
model_trans.eval()
model_refine.eval()
model_MAE.eval()
loss_all = {'loss': AccumLoss()}
error_sum = AccumLoss()
error_sum_test = AccumLoss()
action_error_sum = define_error_list(actions)
action_error_sum_post_out = define_error_list(actions)
action_error_sum_MAE = define_error_list(actions)
joints_left = [5, 6, 7, 11, 12, 13]
joints_right = [2, 3, 4, 8, 9, 10]
data_inference = {}
for i, data in enumerate(tqdm(dataLoader, 0)):
if opt.MAE:
#batch_cam, input_2D, seq, subject, scale, bb_box, cam_ind = data
if split == "train":
batch_cam, input_2D, seq, subject, scale, bb_box, cam_ind = data
else:
batch_cam, input_2D, seq, scale, bb_box = data
[input_2D, batch_cam, scale, bb_box] = get_varialbe(split,[input_2D, batch_cam, scale, bb_box])
N = input_2D.size(0)
f = opt.frames
mask_num = int(f*opt.temporal_mask_rate)
mask = np.hstack([
np.zeros(f - mask_num),
np.ones(mask_num),
]).flatten()
np.random.seed()
np.random.shuffle(mask)
mask = torch.from_numpy(mask).to(torch.bool).cuda()
spatial_mask = np.zeros((f, 17), dtype=bool)
for k in range(f):
ran = random.sample(range(0, 16), opt.spatial_mask_num)
spatial_mask[k, ran] = True
if opt.test_augmentation and split == 'test':
input_2D, output_2D = input_augmentation_MAE(input_2D, model_MAE, joints_left, joints_right, mask, spatial_mask)
else:
input_2D = input_2D.view(N, -1, opt.n_joints, opt.in_channels, 1).permute(0, 3, 1, 2, 4).type(
torch.cuda.FloatTensor)
output_2D = model_MAE(input_2D, mask, spatial_mask)
input_2D = input_2D.permute(0, 2, 3, 1, 4).view(N, -1, opt.n_joints, 2)
output_2D = output_2D.permute(0, 2, 3, 1, 4).view(N, -1, opt.n_joints, 2)
loss = mpjpe_cal(output_2D, torch.cat((input_2D[:, ~mask], input_2D[:, mask]), dim=1))
else:
#batch_cam, gt_3D, input_2D, action, subject, scale, bb_box, cam_ind = data
if split == "train":
batch_cam, gt_3D, input_2D, seq, subject, scale, bb_box, cam_ind = data
else:
batch_cam, gt_3D, input_2D, seq, scale, bb_box = data
[input_2D, gt_3D, batch_cam, scale, bb_box] = get_varialbe(split,
[input_2D, gt_3D, batch_cam, scale, bb_box])
N = input_2D.size(0)
out_target = gt_3D.clone().view(N, -1, opt.out_joints, opt.out_channels)
out_target[:, :, 14] = 0
gt_3D = gt_3D.view(N, -1, opt.out_joints, opt.out_channels).type(torch.cuda.FloatTensor)
if out_target.size(1) > 1:
out_target_single = out_target[:, opt.pad].unsqueeze(1)
gt_3D_single = gt_3D[:, opt.pad].unsqueeze(1)
else:
out_target_single = out_target
gt_3D_single = gt_3D
if opt.test_augmentation and split =='test':
input_2D, output_3D, output_3D_VTE = input_augmentation(input_2D, model_trans, joints_left, joints_right)
else:
input_2D = input_2D.view(N, -1, opt.n_joints, opt.in_channels, 1).permute(0, 3, 1, 2, 4).type(torch.cuda.FloatTensor)
output_3D, output_3D_VTE = model_trans(input_2D)
output_3D_VTE = output_3D_VTE.permute(0, 2, 3, 4, 1).contiguous().view(N, -1, opt.out_joints, opt.out_channels)
output_3D = output_3D.permute(0, 2, 3, 4, 1).contiguous().view(N, -1, opt.out_joints, opt.out_channels)
output_3D_VTE = output_3D_VTE * scale.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).repeat(1, output_3D_VTE.size(1),opt.out_joints, opt.out_channels)
output_3D = output_3D * scale.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).repeat(1, output_3D.size(1),opt.out_joints, opt.out_channels)
output_3D_single = output_3D
if split == 'train':
pred_out = output_3D_VTE
elif split == 'test':
pred_out = output_3D_single
input_2D = input_2D.permute(0, 2, 3, 1, 4).view(N, -1, opt.n_joints ,2)
if opt.refine:
pred_uv = input_2D
uvd = torch.cat((pred_uv[:, opt.pad, :, :].unsqueeze(1), output_3D_single[:, :, :, 2].unsqueeze(-1)), -1)
xyz = get_uvd2xyz(uvd, gt_3D_single, batch_cam)
xyz[:, :, 0, :] = 0
post_out = model_refine(output_3D_single, xyz)
loss = mpjpe_cal(post_out, out_target_single)
else:
loss = mpjpe_cal(pred_out, out_target) + mpjpe_cal(output_3D_single, out_target_single)
loss_all['loss'].update(loss.detach().cpu().numpy() * N, N)
if split == 'train':
optimizer.zero_grad()
loss.backward()
optimizer.step()
if not opt.MAE:
if opt.refine:
post_out[:,:,14,:] = 0
joint_error = mpjpe_cal(post_out, out_target_single).item()
else:
pred_out[:,:,14,:] = 0
joint_error = mpjpe_cal(pred_out, out_target).item()
error_sum.update(joint_error*N, N)
elif split == 'test':
if opt.MAE:
# action_error_sum_MAE = test_calculation(output_2D, torch.cat((input_2D[:, ~mask], input_2D[:, mask]), dim=1), action, action_error_sum_MAE, opt.dataset,
# subject,MAE=opt.MAE)
joint_error_test = mpjpe_cal(torch.cat((input_2D[:, ~mask], input_2D[:, mask]), dim=1), output_2D).item()
else:
pred_out[:, :, 14, :] = 0
#action_error_sum = test_calculation(pred_out, out_target, action, action_error_sum, opt.dataset, subject)
joint_error_test = mpjpe_cal(pred_out, out_target).item()
out = pred_out
# if opt.refine:
# post_out[:, :, 14, :] = 0
# action_error_sum_post_out = test_calculation(post_out, out_target, action, action_error_sum_post_out, opt.dataset, subject)
if opt.train == 0:
for seq_cnt in range(len(seq)):
seq_name = seq[seq_cnt]
if seq_name in data_inference:
data_inference[seq_name] = np.concatenate(
(data_inference[seq_name], out[seq_cnt].permute(2, 1, 0).cpu().numpy()), axis=2)
else:
data_inference[seq_name] = out[seq_cnt].permute(2, 1, 0).cpu().numpy()
error_sum_test.update(joint_error_test * N, N)
if split == 'train':
if opt.MAE:
return loss_all['loss'].avg*1000
else:
return loss_all['loss'].avg, error_sum.avg
elif split == 'test':
if opt.MAE:
#p1, p2 = print_error(opt.dataset, action_error_sum_MAE, opt.train)
return error_sum_test.avg*1000
if opt.refine:
p1, p2 = print_error(opt.dataset, action_error_sum_post_out, opt.train)
else:
#p1, p2 = print_error(opt.dataset, action_error_sum, opt.train)
if opt.train == 0:
for seq_name in data_inference.keys():
data_inference[seq_name] = data_inference[seq_name][:, :, None, :]
mat_path = os.path.join(opt.checkpoint, 'inference_data.mat')
scio.savemat(mat_path, data_inference)
return error_sum_test.avg
def input_augmentation_MAE(input_2D, model_trans, joints_left, joints_right, mask, spatial_mask=None):
N, _, T, J, C = input_2D.shape
input_2D_flip = input_2D[:, 1].view(N, T, J, C, 1).permute(0, 3, 1, 2, 4)
input_2D_non_flip = input_2D[:, 0].view(N, T, J, C, 1).permute(0, 3, 1, 2, 4)
output_2D_flip = model_trans(input_2D_flip, mask, spatial_mask)
output_2D_flip[:, 0] *= -1
output_2D_flip[:, :, :, joints_left + joints_right] = output_2D_flip[:, :, :, joints_right + joints_left]
output_2D_non_flip = model_trans(input_2D_non_flip, mask, spatial_mask)
output_2D = (output_2D_non_flip + output_2D_flip) / 2
input_2D = input_2D_non_flip
return input_2D, output_2D
def input_augmentation(input_2D, model_trans, joints_left, joints_right):
N, _, T, J, C = input_2D.shape
input_2D_flip = input_2D[:, 1].view(N, T, J, C, 1).permute(0, 3, 1, 2, 4)
input_2D_non_flip = input_2D[:, 0].view(N, T, J, C, 1).permute(0, 3, 1, 2, 4)
output_3D_flip, output_3D_flip_VTE = model_trans(input_2D_flip)
output_3D_flip_VTE[:, 0] *= -1
output_3D_flip[:, 0] *= -1
output_3D_flip_VTE[:, :, :, joints_left + joints_right] = output_3D_flip_VTE[:, :, :, joints_right + joints_left]
output_3D_flip[:, :, :, joints_left + joints_right] = output_3D_flip[:, :, :, joints_right + joints_left]
output_3D_non_flip, output_3D_non_flip_VTE = model_trans(input_2D_non_flip)
output_3D_VTE = (output_3D_non_flip_VTE + output_3D_flip_VTE) / 2
output_3D = (output_3D_non_flip + output_3D_flip) / 2
input_2D = input_2D_non_flip
return input_2D, output_3D, output_3D_VTE
if __name__ == '__main__':
opt.manualSeed = 1
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.cuda.manual_seed_all(opt.manualSeed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
if opt.train == 1:
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%Y/%m/%d %H:%M:%S', \
filename=os.path.join(opt.checkpoint, 'train.log'), level=logging.INFO)
root_path = opt.root_path
dataset_path = root_path + 'data_3d_' + opt.dataset + '.npz'
#dataset = Human36mDataset(dataset_path, opt)
actions = define_actions(opt.actions)
if opt.train:
#train_data = Fusion(opt=opt, train=True, root_path=root_path)
train_data = Fusion(opt=opt, train=True, root_path=root_path, MAE=opt.MAE)
train_dataloader = torch.utils.data.DataLoader(train_data, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), pin_memory=True)
if opt.test:
#test_data = Fusion(opt=opt, train=False,root_path =root_path)
test_data = Fusion(opt=opt, train=False, root_path=root_path, MAE=opt.MAE)
test_dataloader = torch.utils.data.DataLoader(test_data, batch_size=opt.batchSize,
shuffle=False, num_workers=int(opt.workers), pin_memory=True)
opt.out_joints = 17
model = {}
model['trans'] = nn.DataParallel(Model(opt)).cuda()
model['refine'] = nn.DataParallel(refine(opt)).cuda()
model['MAE'] = nn.DataParallel(Model_MAE(opt)).cuda()
model_params = 0
for parameter in model['trans'].parameters():
model_params += parameter.numel()
print('INFO: Trainable parameter count:', model_params)
if opt.MAE_test_reload==1:
model_dict = model['MAE'].state_dict()
MAE_test_path = opt.previous_dir
pre_dict_MAE = torch.load(MAE_test_path)
for name, key in model_dict.items():
model_dict[name] = pre_dict_MAE[name]
model['MAE'].load_state_dict(model_dict)
if opt.MAE_reload == 1:
model_dict = model['trans'].state_dict()
MAE_path = opt.previous_dir
pre_dict = torch.load(MAE_path)
state_dict = {k: v for k, v in pre_dict.items() if k in model_dict.keys()}
model_dict.update(state_dict)
model['trans'].load_state_dict(model_dict)
model_dict = model['trans'].state_dict()
if opt.reload == 1:
no_refine_path = opt.previous_dir
pre_dict = torch.load(no_refine_path)
for name, key in model_dict.items():
model_dict[name] = pre_dict[name]
model['trans'].load_state_dict(model_dict)
refine_dict = model['refine'].state_dict()
if opt.refine_reload == 1:
refine_path = opt.previous_refine_name
pre_dict_refine = torch.load(refine_path)
for name, key in refine_dict.items():
refine_dict[name] = pre_dict_refine[name]
model['refine'].load_state_dict(refine_dict)
all_param = []
lr = opt.lr
for i_model in model:
all_param += list(model[i_model].parameters())
optimizer_all = optim.Adam(all_param, lr=opt.lr, amsgrad=True)
for epoch in range(1, opt.nepoch):
if opt.train == 1:
if not opt.MAE:
loss, mpjpe = train(opt, actions, train_dataloader, model, optimizer_all, epoch)
else:
loss = train(opt, actions, train_dataloader, model, optimizer_all, epoch)
if opt.test == 1:
if not opt.MAE:
p1 = val(opt, actions, test_dataloader, model)
else:
p1 = val(opt, actions, test_dataloader, model)
data_threshold = p1
if opt.train and data_threshold < opt.previous_best_threshold:
if opt.MAE:
opt.previous_name = save_model(opt.previous_name, opt.checkpoint, epoch, data_threshold,
model['MAE'], 'MAE')
else:
opt.previous_name = save_model(opt.previous_name, opt.checkpoint, epoch, data_threshold, model['trans'], 'no_refine')
if opt.refine:
opt.previous_refine_name = save_model(opt.previous_refine_name, opt.checkpoint, epoch,
data_threshold, model['refine'], 'refine')
opt.previous_best_threshold = data_threshold
if opt.train == 0:
print('p1: %.2f' % (p1))
break
else:
if opt.MAE:
logging.info('epoch: %d, lr: %.7f, loss: %.4f, p1: %.2f' % (
epoch, lr, loss, p1))
print('e: %d, lr: %.7f, loss: %.4f, p1: %.2f' % (epoch, lr, loss, p1))
else:
logging.info('epoch: %d, lr: %.7f, loss: %.4f, MPJPE: %.2f, p1: %.2f' % (epoch, lr, loss, mpjpe, p1))
print('e: %d, lr: %.7f, loss: %.4f, M: %.2f, p1: %.2f' % (epoch, lr, loss, mpjpe, p1))
if epoch % opt.large_decay_epoch == 0:
for param_group in optimizer_all.param_groups:
param_group['lr'] *= opt.lr_decay_large
lr *= opt.lr_decay_large
else:
for param_group in optimizer_all.param_groups:
param_group['lr'] *= opt.lr_decay
lr *= opt.lr_decay
| 16,320 | 38.233173 | 170 | py |
P-STMO | P-STMO-main/run.py | import os
import glob
import torch
import random
import logging
import numpy as np
from tqdm import tqdm
import torch.nn as nn
import torch.utils.data
import torch.optim as optim
from common.opt import opts
from common.utils import *
from common.camera import get_uvd2xyz
from common.load_data_hm36_tds import Fusion
from common.h36m_dataset import Human36mDataset
from model.block.refine import refine
from model.stmo import Model
from model.stmo_pretrain import Model_MAE
from thop import clever_format
from thop.profile import profile
opt = opts().parse()
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu
def train(opt, actions, train_loader, model, optimizer, epoch):
return step('train', opt, actions, train_loader, model, optimizer, epoch)
def val(opt, actions, val_loader, model):
with torch.no_grad():
return step('test', opt, actions, val_loader, model)
def step(split, opt, actions, dataLoader, model, optimizer=None, epoch=None):
model_trans = model['trans']
model_refine = model['refine']
model_MAE = model['MAE']
if split == 'train':
model_trans.train()
model_refine.train()
model_MAE.train()
else:
model_trans.eval()
model_refine.eval()
model_MAE.eval()
loss_all = {'loss': AccumLoss()}
error_sum = AccumLoss()
action_error_sum = define_error_list(actions)
action_error_sum_post_out = define_error_list(actions)
action_error_sum_MAE = define_error_list(actions)
joints_left = [4, 5, 6, 11, 12, 13]
joints_right = [1, 2, 3, 14, 15, 16]
for i, data in enumerate(tqdm(dataLoader, 0)):
if opt.MAE:
batch_cam, input_2D, action, subject, scale, bb_box, cam_ind = data
[input_2D, batch_cam, scale, bb_box] = get_varialbe(split,[input_2D, batch_cam, scale, bb_box])
N = input_2D.size(0)
f = opt.frames
mask_num = int(f*opt.temporal_mask_rate)
mask = np.hstack([
np.zeros(f - mask_num),
np.ones(mask_num),
]).flatten()
np.random.seed()
np.random.shuffle(mask)
mask = torch.from_numpy(mask).to(torch.bool).cuda()
spatial_mask = np.zeros((f, 17), dtype=bool)
for k in range(f):
ran = random.sample(range(0, 16), opt.spatial_mask_num)
spatial_mask[k, ran] = True
if opt.test_augmentation and split == 'test':
input_2D, output_2D = input_augmentation_MAE(input_2D, model_MAE, joints_left, joints_right, mask, spatial_mask)
else:
input_2D = input_2D.view(N, -1, opt.n_joints, opt.in_channels, 1).permute(0, 3, 1, 2, 4).type(
torch.cuda.FloatTensor)
output_2D = model_MAE(input_2D, mask, spatial_mask)
input_2D = input_2D.permute(0, 2, 3, 1, 4).view(N, -1, opt.n_joints, 2)
output_2D = output_2D.permute(0, 2, 3, 1, 4).view(N, -1, opt.n_joints, 2)
loss = mpjpe_cal(output_2D, torch.cat((input_2D[:, ~mask], input_2D[:, mask]), dim=1))
else:
batch_cam, gt_3D, input_2D, action, subject, scale, bb_box, cam_ind = data
[input_2D, gt_3D, batch_cam, scale, bb_box] = get_varialbe(split,
[input_2D, gt_3D, batch_cam, scale, bb_box])
N = input_2D.size(0)
out_target = gt_3D.clone().view(N, -1, opt.out_joints, opt.out_channels)
out_target[:, :, 0] = 0
gt_3D = gt_3D.view(N, -1, opt.out_joints, opt.out_channels).type(torch.cuda.FloatTensor)
if out_target.size(1) > 1:
out_target_single = out_target[:, opt.pad].unsqueeze(1)
gt_3D_single = gt_3D[:, opt.pad].unsqueeze(1)
else:
out_target_single = out_target
gt_3D_single = gt_3D
if opt.test_augmentation and split =='test':
input_2D, output_3D, output_3D_VTE = input_augmentation(input_2D, model_trans, joints_left, joints_right)
else:
input_2D = input_2D.view(N, -1, opt.n_joints, opt.in_channels, 1).permute(0, 3, 1, 2, 4).type(torch.cuda.FloatTensor)
output_3D, output_3D_VTE = model_trans(input_2D)
output_3D_VTE = output_3D_VTE.permute(0, 2, 3, 4, 1).contiguous().view(N, -1, opt.out_joints, opt.out_channels)
output_3D = output_3D.permute(0, 2, 3, 4, 1).contiguous().view(N, -1, opt.out_joints, opt.out_channels)
output_3D_VTE = output_3D_VTE * scale.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).repeat(1, output_3D_VTE.size(1),opt.out_joints, opt.out_channels)
output_3D = output_3D * scale.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).repeat(1, output_3D.size(1),opt.out_joints, opt.out_channels)
output_3D_single = output_3D
if split == 'train':
pred_out = output_3D_VTE
elif split == 'test':
pred_out = output_3D_single
input_2D = input_2D.permute(0, 2, 3, 1, 4).view(N, -1, opt.n_joints ,2)
if opt.refine:
pred_uv = input_2D
uvd = torch.cat((pred_uv[:, opt.pad, :, :].unsqueeze(1), output_3D_single[:, :, :, 2].unsqueeze(-1)), -1)
xyz = get_uvd2xyz(uvd, gt_3D_single, batch_cam)
xyz[:, :, 0, :] = 0
post_out = model_refine(output_3D_single, xyz)
loss = mpjpe_cal(post_out, out_target_single)
else:
loss = mpjpe_cal(pred_out, out_target) + mpjpe_cal(output_3D_single, out_target_single)
loss_all['loss'].update(loss.detach().cpu().numpy() * N, N)
if split == 'train':
optimizer.zero_grad()
loss.backward()
optimizer.step()
if not opt.MAE:
if opt.refine:
post_out[:,:,0,:] = 0
joint_error = mpjpe_cal(post_out, out_target_single).item()
else:
pred_out[:,:,0,:] = 0
joint_error = mpjpe_cal(pred_out, out_target).item()
error_sum.update(joint_error*N, N)
elif split == 'test':
if opt.MAE:
action_error_sum_MAE = test_calculation(output_2D, torch.cat((input_2D[:, ~mask], input_2D[:, mask]), dim=1), action, action_error_sum_MAE, opt.dataset,
subject,MAE=opt.MAE)
else:
pred_out[:, :, 0, :] = 0
action_error_sum = test_calculation(pred_out, out_target, action, action_error_sum, opt.dataset, subject)
if opt.refine:
post_out[:, :, 0, :] = 0
action_error_sum_post_out = test_calculation(post_out, out_target, action, action_error_sum_post_out, opt.dataset, subject)
if split == 'train':
if opt.MAE:
return loss_all['loss'].avg
else:
return loss_all['loss'].avg, error_sum.avg*1000
elif split == 'test':
if opt.MAE:
p1, p2 = print_error(opt.dataset, action_error_sum_MAE, opt.train)
return p1, p2, loss_all['loss'].avg
if opt.refine:
p1, p2 = print_error(opt.dataset, action_error_sum_post_out, opt.train)
else:
p1, p2 = print_error(opt.dataset, action_error_sum, opt.train)
return p1, p2
def input_augmentation_MAE(input_2D, model_trans, joints_left, joints_right, mask, spatial_mask=None):
N, _, T, J, C = input_2D.shape
input_2D_flip = input_2D[:, 1].view(N, T, J, C, 1).permute(0, 3, 1, 2, 4)
input_2D_non_flip = input_2D[:, 0].view(N, T, J, C, 1).permute(0, 3, 1, 2, 4)
output_2D_flip = model_trans(input_2D_flip, mask, spatial_mask)
output_2D_flip[:, 0] *= -1
output_2D_flip[:, :, :, joints_left + joints_right] = output_2D_flip[:, :, :, joints_right + joints_left]
output_2D_non_flip = model_trans(input_2D_non_flip, mask, spatial_mask)
output_2D = (output_2D_non_flip + output_2D_flip) / 2
input_2D = input_2D_non_flip
return input_2D, output_2D
def input_augmentation(input_2D, model_trans, joints_left, joints_right):
N, _, T, J, C = input_2D.shape
input_2D_flip = input_2D[:, 1].view(N, T, J, C, 1).permute(0, 3, 1, 2, 4)
input_2D_non_flip = input_2D[:, 0].view(N, T, J, C, 1).permute(0, 3, 1, 2, 4)
output_3D_flip, output_3D_flip_VTE = model_trans(input_2D_flip)
output_3D_flip_VTE[:, 0] *= -1
output_3D_flip[:, 0] *= -1
output_3D_flip_VTE[:, :, :, joints_left + joints_right] = output_3D_flip_VTE[:, :, :, joints_right + joints_left]
output_3D_flip[:, :, :, joints_left + joints_right] = output_3D_flip[:, :, :, joints_right + joints_left]
output_3D_non_flip, output_3D_non_flip_VTE = model_trans(input_2D_non_flip)
output_3D_VTE = (output_3D_non_flip_VTE + output_3D_flip_VTE) / 2
output_3D = (output_3D_non_flip + output_3D_flip) / 2
input_2D = input_2D_non_flip
return input_2D, output_3D, output_3D_VTE
if __name__ == '__main__':
opt.manualSeed = 1
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.cuda.manual_seed_all(opt.manualSeed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
if opt.train == 1:
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%Y/%m/%d %H:%M:%S', \
filename=os.path.join(opt.checkpoint, 'train.log'), level=logging.INFO)
root_path = opt.root_path
dataset_path = root_path + 'data_3d_' + opt.dataset + '.npz'
dataset = Human36mDataset(dataset_path, opt)
actions = define_actions(opt.actions)
if opt.train:
train_data = Fusion(opt=opt, train=True, dataset=dataset, root_path=root_path, MAE=opt.MAE, tds=opt.t_downsample)
train_dataloader = torch.utils.data.DataLoader(train_data, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), pin_memory=True)
if opt.test:
test_data = Fusion(opt=opt, train=False,dataset=dataset, root_path =root_path, MAE=opt.MAE, tds=opt.t_downsample)
test_dataloader = torch.utils.data.DataLoader(test_data, batch_size=opt.batchSize,
shuffle=False, num_workers=int(opt.workers), pin_memory=True)
opt.out_joints = dataset.skeleton().num_joints()
print(torch.cuda.is_available())
# model_test=Model(opt)
# dsize = (1, 2, 243, 17, 1)
# inputs = torch.randn(dsize)
# total_ops, total_params = profile(model_test, (inputs,), verbose=False)
# macs, params = clever_format([total_ops, total_params], "%.3f")
# print('MACs:', macs)
# print('Paras:', params)
model = {}
model['trans'] = nn.DataParallel(Model(opt)).cuda()
model['refine'] = nn.DataParallel(refine(opt)).cuda()
model['MAE'] = nn.DataParallel(Model_MAE(opt)).cuda()
model_params = 0
for parameter in model['trans'].parameters():
model_params += parameter.numel()
print('INFO: Trainable parameter count:', model_params)
# if opt.MAE_test_reload==1:
# model_dict = model['MAE'].state_dict()
#
# MAE_test_path = opt.previous_dir
#
# pre_dict_MAE = torch.load(MAE_test_path)
# for name, key in model_dict.items():
# model_dict[name] = pre_dict_MAE[name]
# model['MAE'].load_state_dict(model_dict)
if opt.MAE_reload == 1:
model_dict = model['trans'].state_dict()
MAE_path = opt.previous_dir
pre_dict = torch.load(MAE_path)
state_dict = {k: v for k, v in pre_dict.items() if k in model_dict.keys()}
model_dict.update(state_dict)
model['trans'].load_state_dict(model_dict)
model_dict = model['trans'].state_dict()
if opt.reload == 1:
no_refine_path = opt.previous_dir
pre_dict = torch.load(no_refine_path)
for name, key in model_dict.items():
model_dict[name] = pre_dict[name]
model['trans'].load_state_dict(model_dict)
refine_dict = model['refine'].state_dict()
if opt.refine_reload == 1:
refine_path = opt.previous_refine_name
pre_dict_refine = torch.load(refine_path)
for name, key in refine_dict.items():
refine_dict[name] = pre_dict_refine[name]
model['refine'].load_state_dict(refine_dict)
all_param = []
lr = opt.lr
for i_model in model:
all_param += list(model[i_model].parameters())
optimizer_all = optim.Adam(all_param, lr=opt.lr, amsgrad=True)
for epoch in range(1, opt.nepoch):
if opt.train == 1:
if not opt.MAE:
loss, mpjpe = train(opt, actions, train_dataloader, model, optimizer_all, epoch)
else:
loss = train(opt, actions, train_dataloader, model, optimizer_all, epoch)
if opt.test == 1:
if not opt.MAE:
p1, p2 = val(opt, actions, test_dataloader, model)
else:
p1, p2, loss_test = val(opt, actions, test_dataloader, model)
data_threshold = p1
if opt.train and data_threshold < opt.previous_best_threshold:
if opt.MAE:
opt.previous_name = save_model(opt.previous_name, opt.checkpoint, epoch, data_threshold,
model['MAE'], 'pretrain')
else:
opt.previous_name = save_model(opt.previous_name, opt.checkpoint, epoch, data_threshold, model['trans'], 'no_refine')
if opt.refine:
opt.previous_refine_name = save_model(opt.previous_refine_name, opt.checkpoint, epoch,
data_threshold, model['refine'], 'refine')
opt.previous_best_threshold = data_threshold
if opt.train == 0:
print('p1: %.2f, p2: %.2f' % (p1, p2))
break
else:
if opt.MAE:
logging.info('epoch: %d, lr: %.7f, loss: %.4f, loss_test: %.4f, p1: %.2f, p2: %.2f' % (
epoch, lr, loss, loss_test, p1, p2))
print('e: %d, lr: %.7f, loss: %.4f, loss_test: %.4f, p1: %.2f, p2: %.2f' % (epoch, lr, loss, loss_test, p1, p2))
else:
logging.info('epoch: %d, lr: %.7f, loss: %.4f, MPJPE: %.2f, p1: %.2f, p2: %.2f' % (epoch, lr, loss, mpjpe, p1, p2))
print('e: %d, lr: %.7f, loss: %.4f, M: %.2f, p1: %.2f, p2: %.2f' % (epoch, lr, loss, mpjpe, p1, p2))
if epoch % opt.large_decay_epoch == 0:
for param_group in optimizer_all.param_groups:
param_group['lr'] *= opt.lr_decay_large
lr *= opt.lr_decay_large
else:
for param_group in optimizer_all.param_groups:
param_group['lr'] *= opt.lr_decay
lr *= opt.lr_decay
| 15,226 | 37.745547 | 168 | py |
P-STMO | P-STMO-main/run_in_the_wild.py | import os
import glob
import torch
import random
import logging
import numpy as np
from tqdm import tqdm
import torch.nn as nn
import torch.utils.data
import torch.optim as optim
from common.opt import opts
from common.utils import *
from common.camera import get_uvd2xyz
from common.load_data_hm36_tds_in_the_wild import Fusion
from common.h36m_dataset import Human36mDataset
from model.block.refine import refine
from model.stmo import Model
from model.stmo_pretrain import Model_MAE
from thop import clever_format
from thop.profile import profile
opt = opts().parse()
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu
def train(opt, actions, train_loader, model, optimizer, epoch):
return step('train', opt, actions, train_loader, model, optimizer, epoch)
def val(opt, actions, val_loader, model):
with torch.no_grad():
return step('test', opt, actions, val_loader, model)
def step(split, opt, actions, dataLoader, model, optimizer=None, epoch=None):
model_trans = model['trans']
model_refine = model['refine']
model_MAE = model['MAE']
if split == 'train':
model_trans.train()
model_refine.train()
model_MAE.train()
else:
model_trans.eval()
model_refine.eval()
model_MAE.eval()
loss_all = {'loss': AccumLoss()}
error_sum = AccumLoss()
action_error_sum = define_error_list(actions)
action_error_sum_post_out = define_error_list(actions)
action_error_sum_MAE = define_error_list(actions)
joints_left = [4, 5, 6, 11, 12, 13]
joints_right = [1, 2, 3, 14, 15, 16]
for i, data in enumerate(tqdm(dataLoader, 0)):
if opt.MAE:
batch_cam, input_2D, action, subject, scale, bb_box, cam_ind = data
[input_2D, batch_cam, scale, bb_box] = get_varialbe(split,[input_2D, batch_cam, scale, bb_box])
N = input_2D.size(0)
f = opt.frames
mask_num = int(f*opt.temporal_mask_rate)
mask = np.hstack([
np.zeros(f - mask_num),
np.ones(mask_num),
]).flatten()
np.random.seed()
np.random.shuffle(mask)
mask = torch.from_numpy(mask).to(torch.bool).cuda()
spatial_mask = np.zeros((f, 17), dtype=bool)
for k in range(f):
ran = random.sample(range(0, 16), opt.spatial_mask_num)
spatial_mask[k, ran] = True
if opt.test_augmentation and split == 'test':
input_2D, output_2D = input_augmentation_MAE(input_2D, model_MAE, joints_left, joints_right, mask, spatial_mask)
else:
input_2D = input_2D.view(N, -1, opt.n_joints, opt.in_channels, 1).permute(0, 3, 1, 2, 4).type(
torch.cuda.FloatTensor)
output_2D = model_MAE(input_2D, mask, spatial_mask)
input_2D = input_2D.permute(0, 2, 3, 1, 4).view(N, -1, opt.n_joints, 2)
output_2D = output_2D.permute(0, 2, 3, 1, 4).view(N, -1, opt.n_joints, 2)
#a = input_2D[:, mask]
loss = mpjpe_cal(output_2D, torch.cat((input_2D[:, ~mask], input_2D[:, mask]), dim=1))
#my_loss_one = torch.mean(torch.norm(output_2D[20,180]-a[20,180], dim=1))
else:
batch_cam, gt_3D, input_2D, action, subject, scale, bb_box, cam_ind = data
[input_2D, gt_3D, batch_cam, scale, bb_box] = get_varialbe(split,
[input_2D, gt_3D, batch_cam, scale, bb_box])
N = input_2D.size(0)
out_target = gt_3D.clone().view(N, -1, opt.out_joints, opt.out_channels)
out_target[:, :, 0] = 0
gt_3D = gt_3D.view(N, -1, opt.out_joints, opt.out_channels).type(torch.cuda.FloatTensor)
if out_target.size(1) > 1:
out_target_single = out_target[:, opt.pad].unsqueeze(1)
gt_3D_single = gt_3D[:, opt.pad].unsqueeze(1)
else:
out_target_single = out_target
gt_3D_single = gt_3D
if opt.test_augmentation and split =='test':
input_2D, output_3D, output_3D_VTE = input_augmentation(input_2D, model_trans, joints_left, joints_right)
else:
input_2D = input_2D.view(N, -1, opt.n_joints, opt.in_channels, 1).permute(0, 3, 1, 2, 4).type(torch.cuda.FloatTensor)
output_3D, output_3D_VTE = model_trans(input_2D)
output_3D_VTE = output_3D_VTE.permute(0, 2, 3, 4, 1).contiguous().view(N, -1, opt.out_joints, opt.out_channels)
output_3D = output_3D.permute(0, 2, 3, 4, 1).contiguous().view(N, -1, opt.out_joints, opt.out_channels)
output_3D_VTE = output_3D_VTE * scale.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).repeat(1, output_3D_VTE.size(1),opt.out_joints, opt.out_channels)
output_3D = output_3D * scale.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).repeat(1, output_3D.size(1),opt.out_joints, opt.out_channels)
output_3D_single = output_3D
if split == 'train':
pred_out = output_3D_VTE
elif split == 'test':
pred_out = output_3D_single
input_2D = input_2D.permute(0, 2, 3, 1, 4).view(N, -1, opt.n_joints ,2)
if opt.refine:
pred_uv = input_2D
uvd = torch.cat((pred_uv[:, opt.pad, :, :].unsqueeze(1), output_3D_single[:, :, :, 2].unsqueeze(-1)), -1)
xyz = get_uvd2xyz(uvd, gt_3D_single, batch_cam)
xyz[:, :, 0, :] = 0
post_out = model_refine(output_3D_single, xyz)
loss = mpjpe_cal(post_out, out_target_single)
else:
loss = mpjpe_cal(pred_out, out_target) + mpjpe_cal(output_3D_single, out_target_single)
loss_all['loss'].update(loss.detach().cpu().numpy() * N, N)
if split == 'train':
optimizer.zero_grad()
loss.backward()
optimizer.step()
if not opt.MAE:
if opt.refine:
post_out[:,:,0,:] = 0
joint_error = mpjpe_cal(post_out, out_target_single).item()
else:
pred_out[:,:,0,:] = 0
joint_error = mpjpe_cal(pred_out, out_target).item()
error_sum.update(joint_error*N, N)
elif split == 'test':
if opt.MAE:
action_error_sum_MAE = test_calculation(output_2D, torch.cat((input_2D[:, ~mask], input_2D[:, mask]), dim=1), action, action_error_sum_MAE, opt.dataset,
subject,MAE=opt.MAE)
else:
pred_out[:, :, 0, :] = 0
action_error_sum = test_calculation(pred_out, out_target, action, action_error_sum, opt.dataset, subject)
if opt.refine:
post_out[:, :, 0, :] = 0
action_error_sum_post_out = test_calculation(post_out, out_target, action, action_error_sum_post_out, opt.dataset, subject)
if split == 'train':
if opt.MAE:
return loss_all['loss'].avg
else:
return loss_all['loss'].avg, error_sum.avg*1000
elif split == 'test':
if opt.MAE:
p1, p2 = print_error(opt.dataset, action_error_sum_MAE, opt.train)
return p1, p2, loss_all['loss'].avg
if opt.refine:
p1, p2 = print_error(opt.dataset, action_error_sum_post_out, opt.train)
else:
p1, p2 = print_error(opt.dataset, action_error_sum, opt.train)
return p1, p2
def input_augmentation_MAE(input_2D, model_trans, joints_left, joints_right, mask, spatial_mask=None):
N, _, T, J, C = input_2D.shape
input_2D_flip = input_2D[:, 1].view(N, T, J, C, 1).permute(0, 3, 1, 2, 4)
input_2D_non_flip = input_2D[:, 0].view(N, T, J, C, 1).permute(0, 3, 1, 2, 4)
output_2D_flip = model_trans(input_2D_flip, mask, spatial_mask)
output_2D_flip[:, 0] *= -1
output_2D_flip[:, :, :, joints_left + joints_right] = output_2D_flip[:, :, :, joints_right + joints_left]
output_2D_non_flip = model_trans(input_2D_non_flip, mask, spatial_mask)
output_2D = (output_2D_non_flip + output_2D_flip) / 2
input_2D = input_2D_non_flip
return input_2D, output_2D
def input_augmentation(input_2D, model_trans, joints_left, joints_right):
N, _, T, J, C = input_2D.shape
input_2D_flip = input_2D[:, 1].view(N, T, J, C, 1).permute(0, 3, 1, 2, 4)
input_2D_non_flip = input_2D[:, 0].view(N, T, J, C, 1).permute(0, 3, 1, 2, 4)
output_3D_flip, output_3D_flip_VTE = model_trans(input_2D_flip)
output_3D_flip_VTE[:, 0] *= -1
output_3D_flip[:, 0] *= -1
output_3D_flip_VTE[:, :, :, joints_left + joints_right] = output_3D_flip_VTE[:, :, :, joints_right + joints_left]
output_3D_flip[:, :, :, joints_left + joints_right] = output_3D_flip[:, :, :, joints_right + joints_left]
output_3D_non_flip, output_3D_non_flip_VTE = model_trans(input_2D_non_flip)
output_3D_VTE = (output_3D_non_flip_VTE + output_3D_flip_VTE) / 2
output_3D = (output_3D_non_flip + output_3D_flip) / 2
input_2D = input_2D_non_flip
return input_2D, output_3D, output_3D_VTE
if __name__ == '__main__':
opt.manualSeed = 1
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.cuda.manual_seed_all(opt.manualSeed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
if opt.train == 1:
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%Y/%m/%d %H:%M:%S', \
filename=os.path.join(opt.checkpoint, 'train.log'), level=logging.INFO)
root_path = opt.root_path
dataset_path = root_path + 'data_3d_' + opt.dataset + '.npz'
dataset = Human36mDataset(dataset_path, opt)
actions = define_actions(opt.actions)
if opt.train:
train_data = Fusion(opt=opt, train=True, dataset=dataset, root_path=root_path, MAE=opt.MAE, tds=opt.t_downsample)
train_dataloader = torch.utils.data.DataLoader(train_data, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers), pin_memory=True)
if opt.test:
test_data = Fusion(opt=opt, train=False,dataset=dataset, root_path =root_path, MAE=opt.MAE, tds=opt.t_downsample)
test_dataloader = torch.utils.data.DataLoader(test_data, batch_size=opt.batchSize,
shuffle=False, num_workers=int(opt.workers), pin_memory=True)
opt.out_joints = dataset.skeleton().num_joints()
print(torch.cuda.is_available())
model_test=Model(opt)
dsize = (1, 2, 243, 17, 1)
inputs = torch.randn(dsize)
total_ops, total_params = profile(model_test, (inputs,), verbose=False)
macs, params = clever_format([total_ops, total_params], "%.3f")
print('MACs:', macs)
print('Paras:', params)
model = {}
model['trans'] = nn.DataParallel(Model(opt)).cuda()
model['refine'] = nn.DataParallel(refine(opt)).cuda()
model['MAE'] = nn.DataParallel(Model_MAE(opt)).cuda()
model_params = 0
for parameter in model['trans'].parameters():
model_params += parameter.numel()
print('INFO: Trainable parameter count:', model_params)
if opt.MAE_reload == 1:
model_dict = model['trans'].state_dict()
MAE_path = opt.previous_dir
pre_dict = torch.load(MAE_path)
state_dict = {k: v for k, v in pre_dict.items() if k in model_dict.keys()}
model_dict.update(state_dict)
model['trans'].load_state_dict(model_dict)
# cnt = 0
# log_path = os.path.join(opt.checkpoint, 'pretrain.txt')
# log_path_cur = os.path.join(opt.checkpoint, 'network.txt')
# f1 = open(log_path, mode='a')
# f2 = open(log_path_cur, mode='a')
# for k, v in pre_dict.items():
# f1.write('%d\n' % cnt)
# f1.write(k+'\n')
# cnt+=1
# f1.close()
# cnt = 0
# for k in model_dict.keys():
# f2.write('%d\n' % cnt)
# f2.write(k+'\n')
# cnt+=1
# f2.close()
model_dict = model['trans'].state_dict()
if opt.reload == 1:
no_refine_path = opt.previous_dir
pre_dict = torch.load(no_refine_path)
for name, key in model_dict.items():
model_dict[name] = pre_dict[name]
model['trans'].load_state_dict(model_dict)
refine_dict = model['refine'].state_dict()
if opt.refine_reload == 1:
refine_path = opt.previous_refine_name
pre_dict_refine = torch.load(refine_path)
for name, key in refine_dict.items():
refine_dict[name] = pre_dict_refine[name]
model['refine'].load_state_dict(refine_dict)
all_param = []
lr = opt.lr
for i_model in model:
all_param += list(model[i_model].parameters())
optimizer_all = optim.Adam(all_param, lr=opt.lr, amsgrad=True)
for epoch in range(1, opt.nepoch):
if opt.train == 1:
if not opt.MAE:
loss, mpjpe = train(opt, actions, train_dataloader, model, optimizer_all, epoch)
else:
loss = train(opt, actions, train_dataloader, model, optimizer_all, epoch)
if opt.test == 1:
if not opt.MAE:
p1, p2 = val(opt, actions, test_dataloader, model)
else:
p1, p2, loss_test = val(opt, actions, test_dataloader, model)
data_threshold = p1
if opt.train and data_threshold < opt.previous_best_threshold:
if opt.MAE:
opt.previous_name = save_model(opt.previous_name, opt.checkpoint, epoch, data_threshold,
model['MAE'], 'MAE')
else:
opt.previous_name = save_model(opt.previous_name, opt.checkpoint, epoch, data_threshold, model['trans'], 'no_refine')
if opt.refine:
opt.previous_refine_name = save_model(opt.previous_refine_name, opt.checkpoint, epoch,
data_threshold, model['refine'], 'refine')
opt.previous_best_threshold = data_threshold
if opt.train == 0:
print('p1: %.2f, p2: %.2f' % (p1, p2))
break
else:
if opt.MAE:
logging.info('epoch: %d, lr: %.7f, loss: %.4f, loss_test: %.4f, p1: %.2f, p2: %.2f' % (
epoch, lr, loss, loss_test, p1, p2))
print('e: %d, lr: %.7f, loss: %.4f, loss_test: %.4f, p1: %.2f, p2: %.2f' % (epoch, lr, loss, loss_test, p1, p2))
else:
logging.info('epoch: %d, lr: %.7f, loss: %.4f, MPJPE: %.2f, p1: %.2f, p2: %.2f' % (epoch, lr, loss, mpjpe, p1, p2))
print('e: %d, lr: %.7f, loss: %.4f, M: %.2f, p1: %.2f, p2: %.2f' % (epoch, lr, loss, mpjpe, p1, p2))
if epoch % opt.large_decay_epoch == 0:
for param_group in optimizer_all.param_groups:
param_group['lr'] *= opt.lr_decay_large
lr *= opt.lr_decay_large
else:
for param_group in optimizer_all.param_groups:
param_group['lr'] *= opt.lr_decay
lr *= opt.lr_decay
| 15,554 | 37.790524 | 168 | py |
P-STMO | P-STMO-main/common/load_data_hm36_tds_in_the_wild.py |
import torch.utils.data as data
import numpy as np
from common.utils import deterministic_random
from common.camera import world_to_camera, normalize_screen_coordinates
from common.generator_tds import ChunkedGenerator
class Fusion(data.Dataset):
def __init__(self, opt, dataset, root_path, train=True, MAE=False, tds=1):
self.data_type = opt.dataset
self.train = train
self.keypoints_name = opt.keypoints
self.root_path = root_path
self.train_list = opt.subjects_train.split(',')
self.test_list = opt.subjects_test.split(',')
self.action_filter = None if opt.actions == '*' else opt.actions.split(',')
self.downsample = opt.downsample
self.subset = opt.subset
self.stride = opt.stride
self.crop_uv = opt.crop_uv
self.test_aug = opt.test_augmentation
self.pad = opt.pad
self.MAE=MAE
if self.train:
self.keypoints = self.prepare_data(dataset, self.train_list)
self.cameras_train, self.poses_train, self.poses_train_2d = self.fetch(dataset, self.train_list,
subset=self.subset)
self.generator = ChunkedGenerator(opt.batchSize // opt.stride, self.cameras_train, self.poses_train,
self.poses_train_2d, self.stride, pad=self.pad,
augment=opt.data_augmentation, reverse_aug=opt.reverse_augmentation,
kps_left=self.kps_left, kps_right=self.kps_right,
joints_left=self.joints_left,
joints_right=self.joints_right, out_all=opt.out_all, MAE=MAE, tds=tds)
print('INFO: Training on {} frames'.format(self.generator.num_frames()))
else:
self.keypoints = self.prepare_data(dataset, self.test_list)
self.cameras_test, self.poses_test, self.poses_test_2d = self.fetch(dataset, self.test_list,
subset=self.subset)
self.generator = ChunkedGenerator(opt.batchSize // opt.stride, self.cameras_test, self.poses_test,
self.poses_test_2d,
pad=self.pad, augment=False, kps_left=self.kps_left,
kps_right=self.kps_right, joints_left=self.joints_left,
joints_right=self.joints_right, MAE=MAE, tds=tds)
self.key_index = self.generator.saved_index
print('INFO: Testing on {} frames'.format(self.generator.num_frames()))
def prepare_data(self, dataset, folder_list):
for subject in folder_list:
for action in dataset[subject].keys():
anim = dataset[subject][action]
positions_3d = []
for cam in anim['cameras']:
pos_3d = world_to_camera(anim['positions'], R=cam['orientation'], t=cam['translation'])
pos_3d[:, 1:] -= pos_3d[:, :1]
if self.keypoints_name.startswith('sh'):
pos_3d = np.delete(pos_3d,obj=9,axis=1)
positions_3d.append(pos_3d)
anim['positions_3d'] = positions_3d
keypoints = np.load(self.root_path + 'data_2d_' + self.data_type + '_' + self.keypoints_name + '.npz',allow_pickle=True)
keypoints_symmetry = keypoints['metadata'].item()['keypoints_symmetry']
self.kps_left, self.kps_right = list(keypoints_symmetry[0]), list(keypoints_symmetry[1])
self.joints_left, self.joints_right = list(dataset.skeleton().joints_left()), list(dataset.skeleton().joints_right())
keypoints = keypoints['positions_2d'].item()
for subject in folder_list:
assert subject in keypoints, 'Subject {} is missing from the 2D detections dataset'.format(subject)
for action in dataset[subject].keys():
assert action in keypoints[
subject], 'Action {} of subject {} is missing from the 2D detections dataset'.format(action,
subject)
for cam_idx in range(len(keypoints[subject][action])):
mocap_length = dataset[subject][action]['positions_3d'][cam_idx].shape[0]
assert keypoints[subject][action][cam_idx].shape[0] >= mocap_length
if keypoints[subject][action][cam_idx].shape[0] > mocap_length:
keypoints[subject][action][cam_idx] = keypoints[subject][action][cam_idx][:mocap_length]
for subject in keypoints.keys():
for action in keypoints[subject]:
for cam_idx, kps in enumerate(keypoints[subject][action]):
cam = dataset.cameras()[subject][cam_idx]
if self.crop_uv == 0:
kps[..., :2] = normalize_screen_coordinates(kps[..., :2], w=cam['res_w'], h=cam['res_h'])
keypoints[subject][action][cam_idx] = kps
return keypoints
def fetch(self, dataset, subjects, subset=1, parse_3d_poses=True):
out_poses_3d = {}
out_poses_2d = {}
out_camera_params = {}
for subject in subjects:
for action in self.keypoints[subject].keys():
if self.action_filter is not None:
found = False
for a in self.action_filter:
if action.startswith(a):
found = True
break
if not found:
continue
poses_2d = self.keypoints[subject][action]
for i in range(len(poses_2d)):
out_poses_2d[(subject, action, i)] = poses_2d[i][..., :2]
if subject in dataset.cameras():
cams = dataset.cameras()[subject]
assert len(cams) == len(poses_2d), 'Camera count mismatch'
for i, cam in enumerate(cams):
if 'intrinsic' in cam:
out_camera_params[(subject, action, i)] = cam['intrinsic']
if parse_3d_poses and 'positions_3d' in dataset[subject][action]:
poses_3d = dataset[subject][action]['positions_3d']
assert len(poses_3d) == len(poses_2d), 'Camera count mismatch'
for i in range(len(poses_3d)):
out_poses_3d[(subject, action, i)] = poses_3d[i]
if len(out_camera_params) == 0:
out_camera_params = None
if len(out_poses_3d) == 0:
out_poses_3d = None
stride = self.downsample
if subset < 1:
for key in out_poses_2d.keys():
n_frames = int(round(len(out_poses_2d[key]) // stride * subset) * stride)
start = deterministic_random(0, len(out_poses_2d[key]) - n_frames + 1, str(len(out_poses_2d[key])))
out_poses_2d[key] = out_poses_2d[key][start:start + n_frames:stride]
if out_poses_3d is not None:
out_poses_3d[key] = out_poses_3d[key][start:start + n_frames:stride]
elif stride > 1:
for key in out_poses_2d.keys():
out_poses_2d[key] = out_poses_2d[key][::stride]
if out_poses_3d is not None:
out_poses_3d[key] = out_poses_3d[key][::stride]
return out_camera_params, out_poses_3d, out_poses_2d
def __len__(self):
return len(self.generator.pairs)
#return 200
def __getitem__(self, index):
seq_name, start_3d, end_3d, flip, reverse = self.generator.pairs[index]
if self.MAE:
cam, input_2D, action, subject, cam_ind = self.generator.get_batch(seq_name, start_3d, end_3d, flip,
reverse)
if self.train == False and self.test_aug:
_, input_2D_aug, _, _,_ = self.generator.get_batch(seq_name, start_3d, end_3d, flip=True, reverse=reverse)
input_2D = np.concatenate((np.expand_dims(input_2D,axis=0),np.expand_dims(input_2D_aug,axis=0)),0)
else:
cam, gt_3D, input_2D, action, subject, cam_ind = self.generator.get_batch(seq_name, start_3d, end_3d, flip, reverse)
if self.train == False and self.test_aug:
_, _, input_2D_aug, _, _,_ = self.generator.get_batch(seq_name, start_3d, end_3d, flip=True, reverse=reverse)
input_2D = np.concatenate((np.expand_dims(input_2D,axis=0),np.expand_dims(input_2D_aug,axis=0)),0)
bb_box = np.array([0, 0, 1, 1])
input_2D_update = input_2D
scale = np.float(1.0)
if self.MAE:
return cam, input_2D_update, action, subject, scale, bb_box, cam_ind
else:
return cam, gt_3D, input_2D_update, action, subject, scale, bb_box, cam_ind
| 9,334 | 50.291209 | 128 | py |
P-STMO | P-STMO-main/common/h36m_dataset.py |
import numpy as np
import copy
from common.skeleton import Skeleton
from common.mocap_dataset import MocapDataset
from common.camera import normalize_screen_coordinates
h36m_skeleton = Skeleton(parents=[-1, 0, 1, 2, 3, 4, 0, 6, 7, 8, 9, 0, 11, 12, 13, 14, 12,
16, 17, 18, 19, 20, 19, 22, 12, 24, 25, 26, 27, 28, 27, 30],
joints_left=[6, 7, 8, 9, 10, 16, 17, 18, 19, 20, 21, 22, 23],
joints_right=[1, 2, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31])
h36m_cameras_intrinsic_params = [
{
'id': '54138969',
'center': [512.54150390625, 515.4514770507812],
'focal_length': [1145.0494384765625, 1143.7811279296875],
'radial_distortion': [-0.20709891617298126, 0.24777518212795258, -0.0030751503072679043],
'tangential_distortion': [-0.0009756988729350269, -0.00142447161488235],
'res_w': 1000,
'res_h': 1002,
'azimuth': 70,
},
{
'id': '55011271',
'center': [508.8486328125, 508.0649108886719],
'focal_length': [1149.6756591796875, 1147.5916748046875],
'radial_distortion': [-0.1942136287689209, 0.2404085397720337, 0.006819975562393665],
'tangential_distortion': [-0.0016190266469493508, -0.0027408944442868233],
'res_w': 1000,
'res_h': 1000,
'azimuth': -70,
},
{
'id': '58860488',
'center': [519.8158569335938, 501.40264892578125],
'focal_length': [1149.1407470703125, 1148.7989501953125],
'radial_distortion': [-0.2083381861448288, 0.25548800826072693, -0.0024604974314570427],
'tangential_distortion': [0.0014843869721516967, -0.0007599993259645998],
'res_w': 1000,
'res_h': 1000,
'azimuth': 110,
},
{
'id': '60457274',
'center': [514.9682006835938, 501.88201904296875],
'focal_length': [1145.5113525390625, 1144.77392578125],
'radial_distortion': [-0.198384091258049, 0.21832367777824402, -0.008947807364165783],
'tangential_distortion': [-0.0005872055771760643, -0.0018133620033040643],
'res_w': 1000,
'res_h': 1002,
'azimuth': -110,
},
]
h36m_cameras_extrinsic_params = {
'S1': [
{
'orientation': [0.1407056450843811, -0.1500701755285263, -0.755240797996521, 0.6223280429840088],
'translation': [1841.1070556640625, 4955.28466796875, 1563.4454345703125],
},
{
'orientation': [0.6157187819480896, -0.764836311340332, -0.14833825826644897, 0.11794740706682205],
'translation': [1761.278564453125, -5078.0068359375, 1606.2650146484375],
},
{
'orientation': [0.14651472866535187, -0.14647851884365082, 0.7653023600578308, -0.6094175577163696],
'translation': [-1846.7777099609375, 5215.04638671875, 1491.972412109375],
},
{
'orientation': [0.5834008455276489, -0.7853162288665771, 0.14548823237419128, -0.14749594032764435],
'translation': [-1794.7896728515625, -3722.698974609375, 1574.8927001953125],
},
],
'S2': [
{},
{},
{},
{},
],
'S3': [
{},
{},
{},
{},
],
'S4': [
{},
{},
{},
{},
],
'S5': [
{
'orientation': [0.1467377245426178, -0.162370964884758, -0.7551892995834351, 0.6178938746452332],
'translation': [2097.3916015625, 4880.94482421875, 1605.732421875],
},
{
'orientation': [0.6159758567810059, -0.7626792192459106, -0.15728192031383514, 0.1189815029501915],
'translation': [2031.7008056640625, -5167.93310546875, 1612.923095703125],
},
{
'orientation': [0.14291371405124664, -0.12907841801643372, 0.7678384780883789, -0.6110143065452576],
'translation': [-1620.5948486328125, 5171.65869140625, 1496.43701171875],
},
{
'orientation': [0.5920479893684387, -0.7814217805862427, 0.1274748593568802, -0.15036417543888092],
'translation': [-1637.1737060546875, -3867.3173828125, 1547.033203125],
},
],
'S6': [
{
'orientation': [0.1337897777557373, -0.15692396461963654, -0.7571090459823608, 0.6198879480361938],
'translation': [1935.4517822265625, 4950.24560546875, 1618.0838623046875],
},
{
'orientation': [0.6147197484970093, -0.7628812789916992, -0.16174767911434174, 0.11819244921207428],
'translation': [1969.803955078125, -5128.73876953125, 1632.77880859375],
},
{
'orientation': [0.1529948115348816, -0.13529130816459656, 0.7646096348762512, -0.6112781167030334],
'translation': [-1769.596435546875, 5185.361328125, 1476.993408203125],
},
{
'orientation': [0.5916101336479187, -0.7804774045944214, 0.12832270562648773, -0.1561593860387802],
'translation': [-1721.668701171875, -3884.13134765625, 1540.4879150390625],
},
],
'S7': [
{
'orientation': [0.1435241848230362, -0.1631336808204651, -0.7548328638076782, 0.6188824772834778],
'translation': [1974.512939453125, 4926.3544921875, 1597.8326416015625],
},
{
'orientation': [0.6141672730445862, -0.7638262510299683, -0.1596645563840866, 0.1177929937839508],
'translation': [1937.0584716796875, -5119.7900390625, 1631.5665283203125],
},
{
'orientation': [0.14550060033798218, -0.12874816358089447, 0.7660516500473022, -0.6127139329910278],
'translation': [-1741.8111572265625, 5208.24951171875, 1464.8245849609375],
},
{
'orientation': [0.5912848114967346, -0.7821764349937439, 0.12445473670959473, -0.15196487307548523],
'translation': [-1734.7105712890625, -3832.42138671875, 1548.5830078125],
},
],
'S8': [
{
'orientation': [0.14110587537288666, -0.15589867532253265, -0.7561917304992676, 0.619644045829773],
'translation': [2150.65185546875, 4896.1611328125, 1611.9046630859375],
},
{
'orientation': [0.6169601678848267, -0.7647668123245239, -0.14846350252628326, 0.11158157885074615],
'translation': [2219.965576171875, -5148.453125, 1613.0440673828125],
},
{
'orientation': [0.1471444070339203, -0.13377119600772858, 0.7670128345489502, -0.6100369691848755],
'translation': [-1571.2215576171875, 5137.0185546875, 1498.1761474609375],
},
{
'orientation': [0.5927824378013611, -0.7825870513916016, 0.12147816270589828, -0.14631995558738708],
'translation': [-1476.913330078125, -3896.7412109375, 1547.97216796875],
},
],
'S9': [
{
'orientation': [0.15540587902069092, -0.15548215806484222, -0.7532095313072205, 0.6199594736099243],
'translation': [2044.45849609375, 4935.1171875, 1481.2275390625],
},
{
'orientation': [0.618784487247467, -0.7634735107421875, -0.14132238924503326, 0.11933968216180801],
'translation': [1990.959716796875, -5123.810546875, 1568.8048095703125],
},
{
'orientation': [0.13357827067375183, -0.1367100477218628, 0.7689454555511475, -0.6100738644599915],
'translation': [-1670.9921875, 5211.98583984375, 1528.387939453125],
},
{
'orientation': [0.5879399180412292, -0.7823407053947449, 0.1427614390850067, -0.14794869720935822],
'translation': [-1696.04345703125, -3827.099853515625, 1591.4127197265625],
},
],
'S11': [
{
'orientation': [0.15232472121715546, -0.15442320704460144, -0.7547563314437866, 0.6191070079803467],
'translation': [2098.440185546875, 4926.5546875, 1500.278564453125],
},
{
'orientation': [0.6189449429512024, -0.7600917220115662, -0.15300633013248444, 0.1255258321762085],
'translation': [2083.182373046875, -4912.1728515625, 1561.07861328125],
},
{
'orientation': [0.14943228662014008, -0.15650227665901184, 0.7681233882904053, -0.6026304364204407],
'translation': [-1609.8153076171875, 5177.3359375, 1537.896728515625],
},
{
'orientation': [0.5894251465797424, -0.7818877100944519, 0.13991211354732513, -0.14715361595153809],
'translation': [-1590.738037109375, -3854.1689453125, 1578.017578125],
},
],
}
class Human36mDataset(MocapDataset):
def __init__(self, path, opt, remove_static_joints=True):
super().__init__(fps=50, skeleton=h36m_skeleton)
self.train_list = ['S1', 'S5', 'S6', 'S7', 'S8']
self.test_list = ['S9', 'S11']
self._cameras = copy.deepcopy(h36m_cameras_extrinsic_params)
for cameras in self._cameras.values():
for i, cam in enumerate(cameras):
cam.update(h36m_cameras_intrinsic_params[i])
for k, v in cam.items():
if k not in ['id', 'res_w', 'res_h']:
cam[k] = np.array(v, dtype='float32')
if opt.crop_uv == 0:
cam['center'] = normalize_screen_coordinates(cam['center'], w=cam['res_w'], h=cam['res_h']).astype(
'float32')
cam['focal_length'] = cam['focal_length'] / cam['res_w'] * 2
if 'translation' in cam:
cam['translation'] = cam['translation'] / 1000
cam['intrinsic'] = np.concatenate((cam['focal_length'],
cam['center'],
cam['radial_distortion'],
cam['tangential_distortion']))
data = np.load(path,allow_pickle=True)['positions_3d'].item()
self._data = {}
for subject, actions in data.items():
self._data[subject] = {}
for action_name, positions in actions.items():
self._data[subject][action_name] = {
'positions': positions,
'cameras': self._cameras[subject],
}
if remove_static_joints:
self.remove_joints([4, 5, 9, 10, 11, 16, 20, 21, 22, 23, 24, 28, 29, 30, 31])
self._skeleton._parents[11] = 8
self._skeleton._parents[14] = 8
def supports_semi_supervised(self):
return True
| 10,701 | 41.300395 | 119 | py |
P-STMO | P-STMO-main/common/visualization.py | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation, writers
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import subprocess as sp
def get_resolution(filename):
command = ['ffprobe', '-v', 'error', '-select_streams', 'v:0',
'-show_entries', 'stream=width,height', '-of', 'csv=p=0', filename]
with sp.Popen(command, stdout=sp.PIPE, bufsize=-1) as pipe:
for line in pipe.stdout:
w, h = line.decode().strip().split(',')
return int(w), int(h)
def get_fps(filename):
command = ['ffprobe', '-v', 'error', '-select_streams', 'v:0',
'-show_entries', 'stream=r_frame_rate', '-of', 'csv=p=0', filename]
with sp.Popen(command, stdout=sp.PIPE, bufsize=-1) as pipe:
for line in pipe.stdout:
a, b = line.decode().strip().split('/')
return int(a) / int(b)
def read_video(filename, skip=0, limit=-1):
w, h = get_resolution(filename)
command = ['ffmpeg',
'-i', filename,
'-f', 'image2pipe',
'-pix_fmt', 'rgb24',
'-vsync', '0',
'-vcodec', 'rawvideo', '-']
i = 0
with sp.Popen(command, stdout=sp.PIPE, bufsize=-1) as pipe:
while True:
data = pipe.stdout.read(w * h * 3)
if not data:
break
i += 1
if i > limit and limit != -1:
continue
if i > skip:
yield np.frombuffer(data, dtype='uint8').reshape((h, w, 3))
def downsample_tensor(X, factor):
length = X.shape[0] // factor * factor
return np.mean(X[:length].reshape(-1, factor, *X.shape[1:]), axis=1)
def render_animation(keypoints, keypoints_metadata, poses, skeleton, fps, bitrate, azim, output, viewport,
limit=-1, downsample=1, size=6, input_video_path=None, input_video_skip=0, viz_action="",
viz_subject=""):
"""
TODO
Render an animation. The supported output modes are:
-- 'interactive': display an interactive figure
(also works on notebooks if associated with %matplotlib inline)
-- 'html': render the animation as HTML5 video. Can be displayed in a notebook using HTML(...).
-- 'filename.mp4': render and export the animation as an h264 video (requires ffmpeg).
-- 'filename.gif': render and export the animation a gif file (requires imagemagick).
"""
plt.ioff()
fig = plt.figure(figsize=(size * (1 + len(poses)), size))
ax_in = fig.add_subplot(1, 1 + len(poses), 1)
ax_in.get_xaxis().set_visible(False)
ax_in.get_yaxis().set_visible(False)
ax_in.set_axis_off()
ax_in.set_title('Input')
ax_3d = []
lines_3d = []
trajectories = []
radius = 1.7
for index, (title, data) in enumerate(poses.items()):
ax = fig.add_subplot(1, 1 + len(poses), index + 2, projection='3d')
ax.view_init(elev=15., azim=azim+90.)
ax.set_xlim3d([-radius / 2, radius / 2])
ax.set_zlim3d([0, radius])
ax.set_ylim3d([-radius / 2, radius / 2])
# ax.set_aspect('equal')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
ax.dist = 7.5
ax.set_title(title) # , pad=35
ax_3d.append(ax)
lines_3d.append([])
trajectories.append(data[:, 0, [0, 1]])
poses = list(poses.values())
# Decode video
if input_video_path is None:
# Black background
all_frames = np.zeros((keypoints.shape[0], viewport[1], viewport[0]), dtype='uint8')
else:
# Load video using ffmpeg
all_frames = []
for f in read_video(input_video_path, skip=input_video_skip, limit=limit):
all_frames.append(f)
effective_length = min(keypoints.shape[0], len(all_frames))
all_frames = all_frames[:effective_length]
keypoints = keypoints[input_video_skip:] # todo remove
for idx in range(len(poses)):
poses[idx] = poses[idx][input_video_skip:]
if fps is None:
fps = get_fps(input_video_path)
if downsample > 1:
keypoints = downsample_tensor(keypoints, downsample)
all_frames = downsample_tensor(np.array(all_frames), downsample).astype('uint8')
for idx in range(len(poses)):
poses[idx] = downsample_tensor(poses[idx], downsample)
trajectories[idx] = downsample_tensor(trajectories[idx], downsample)
fps /= downsample
initialized = False
image = None
lines = []
points = None
if limit < 1:
limit = len(all_frames)
else:
limit = min(limit, len(all_frames))
parents = skeleton.parents()
def update_video(i):
nonlocal initialized, image, lines, points
for n, ax in enumerate(ax_3d):
ax.set_xlim3d([-radius / 2 + trajectories[n][i, 0], radius / 2 + trajectories[n][i, 0]])
ax.set_ylim3d([-radius / 2 + trajectories[n][i, 1], radius / 2 + trajectories[n][i, 1]])
# Update 2D poses
# joints_right_2d = keypoints_metadata['keypoints_symmetry'][1]
# joints_left_2d = keypoints_metadata['keypoints_symmetry'][0]
joints_left_2d = [4, 5, 6, 11, 12, 13]
joints_right_2d = [1, 2, 3, 14, 15, 16]
colors_2d = np.full(keypoints.shape[1], 'midnightblue', dtype="object")
colors_2d[joints_right_2d] = 'yellowgreen'
colors_2d[joints_left_2d] = 'midnightblue'
if not initialized:
image = ax_in.imshow(all_frames[i], aspect='equal')
for j, j_parent in enumerate(parents):
if j_parent == -1:
continue
# if len(parents) == keypoints.shape[1] and keypoints_metadata['layout_name'] != 'coco':
if len(parents) == keypoints.shape[1]:
# Draw skeleton only if keypoints match (otherwise we don't have the parents definition)
lines.append(ax_in.plot([keypoints[i, j, 0], keypoints[i, j_parent, 0]],
[keypoints[i, j, 1], keypoints[i, j_parent, 1]], color=colors_2d[j]))
col = 'red' if j in skeleton.joints_right() else 'black'
for n, ax in enumerate(ax_3d):
pos = poses[n][i]
lines_3d[n].append(ax.plot([pos[j, 0], pos[j_parent, 0]],
[pos[j, 1], pos[j_parent, 1]],
[pos[j, 2], pos[j_parent, 2]], zdir='z', c=colors_2d[j]))
# points = ax_in.scatter(*keypoints[i].T, 0, zorder=10)
initialized = True
else:
image.set_data(all_frames[i])
for j, j_parent in enumerate(parents):
if j_parent == -1:
continue
# if len(parents) == keypoints.shape[1] and keypoints_metadata['layout_name'] != 'coco':
if len(parents) == keypoints.shape[1]:
lines[j - 1][0].set_data([keypoints[i, j, 0], keypoints[i, j_parent, 0]],
[keypoints[i, j, 1], keypoints[i, j_parent, 1]])
for n, ax in enumerate(ax_3d):
pos = poses[n][i]
lines_3d[n][j - 1][0].set_xdata([pos[j, 0], pos[j_parent, 0]])
lines_3d[n][j - 1][0].set_ydata([pos[j, 1], pos[j_parent, 1]])
lines_3d[n][j - 1][0].set_3d_properties([pos[j, 2], pos[j_parent, 2]], zdir='z')
# points.set_offsets(keypoints[i])
print('{}/{} '.format(i, limit), end='\r')
fig.tight_layout()
anim = FuncAnimation(fig, update_video, frames=np.arange(0, limit), interval=1000 / fps, repeat=False)
if output.endswith('.mp4'):
Writer = writers['ffmpeg']
writer = Writer(fps=fps, metadata={}, bitrate=bitrate)
anim.save(output, writer=writer)
elif output.endswith('.gif'):
anim.save(output, dpi=80, writer='imagemagick')
else:
raise ValueError('Unsupported output format (only .mp4 and .gif are supported)')
plt.close()
def render_animation_temp(keypoints, keypoints_metadata, poses, skeleton, fps, bitrate, azim, output, viewport,
limit=-1, downsample=1, size=6, input_video_path=None, input_video_skip=0, viz_action="",
viz_subject=""):
"""
TODO
Render an animation. The supported output modes are:
-- 'interactive': display an interactive figure
(also works on notebooks if associated with %matplotlib inline)
-- 'html': render the animation as HTML5 video. Can be displayed in a notebook using HTML(...).
-- 'filename.mp4': render and export the animation as an h264 video (requires ffmpeg).
-- 'filename.gif': render and export the animation a gif file (requires imagemagick).
"""
output = output + "_" + viz_subject + "_" + viz_action + ".mp4"
print(output)
plt.ioff()
fig = plt.figure(figsize=(size * (1 + len(poses)), size))
ax_in = fig.add_subplot(1, 1 + len(poses), 1)
ax_in.get_xaxis().set_visible(False)
ax_in.get_yaxis().set_visible(False)
ax_in.set_axis_off()
ax_in.set_title('Input')
ax_3d = []
lines_3d = []
trajectories = []
radius = 1.7
for index, (title, data) in enumerate(poses.items()):
ax = fig.add_subplot(1, 1 + len(poses), index + 2, projection='3d')
ax.view_init(elev=15., azim=azim)
ax.set_xlim3d([-radius / 2, radius / 2])
ax.set_zlim3d([0, radius])
ax.set_ylim3d([-radius / 2, radius / 2])
# ax.set_aspect('equal')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
ax.dist = 7.5
ax.set_title(title) # , pad=35
ax_3d.append(ax)
lines_3d.append([])
trajectories.append(data[:, 0, [0, 1]])
poses = list(poses.values())
# Decode video
if input_video_path is None:
# Black background
all_frames = np.zeros((keypoints.shape[0], viewport[1], viewport[0]), dtype='uint8')
else:
# Load video using ffmpeg
all_frames = []
for f in read_video(input_video_path, skip=input_video_skip, limit=limit):
all_frames.append(f)
effective_length = min(keypoints.shape[0], len(all_frames))
all_frames = all_frames[:effective_length]
keypoints = keypoints[input_video_skip:] # todo remove
for idx in range(len(poses)):
poses[idx] = poses[idx][input_video_skip:]
if fps is None:
fps = get_fps(input_video_path)
if downsample > 1:
keypoints = downsample_tensor(keypoints, downsample)
all_frames = downsample_tensor(np.array(all_frames), downsample).astype('uint8')
for idx in range(len(poses)):
poses[idx] = downsample_tensor(poses[idx], downsample)
trajectories[idx] = downsample_tensor(trajectories[idx], downsample)
fps /= downsample
initialized = False
image = None
lines = []
points = None
if limit < 1:
limit = len(all_frames)
else:
limit = min(limit, len(all_frames))
parents = skeleton.parents()
def update_video(i):
nonlocal initialized, image, lines, points
for n, ax in enumerate(ax_3d):
ax.set_xlim3d([-radius / 2 + trajectories[n][i, 0], radius / 2 + trajectories[n][i, 0]])
ax.set_ylim3d([-radius / 2 + trajectories[n][i, 1], radius / 2 + trajectories[n][i, 1]])
# Update 2D poses
joints_right_2d = keypoints_metadata['keypoints_symmetry'][1]
joints_left_2d = keypoints_metadata['keypoints_symmetry'][0]
colors_2d = np.full(keypoints.shape[1], 'peru', dtype="object")
colors_2d[joints_right_2d] = 'darkseagreen'
colors_2d[joints_left_2d] = 'slateblue'
if not initialized:
image = ax_in.imshow(all_frames[i], aspect='equal')
for j, j_parent in enumerate(parents):
if j_parent == -1:
continue
# if len(parents) == keypoints.shape[1] and keypoints_metadata['layout_name'] != 'coco':
if len(parents) == keypoints.shape[1]:
# Draw skeleton only if keypoints match (otherwise we don't have the parents definition)
lines.append(ax_in.plot([keypoints[i, j, 0], keypoints[i, j_parent, 0]],
[keypoints[i, j, 1], keypoints[i, j_parent, 1]], color=colors_2d[j]))
col = 'red' if j in skeleton.joints_right() else 'black'
for n, ax in enumerate(ax_3d):
pos = poses[n][i]
lines_3d[n].append(ax.plot([pos[j, 0], pos[j_parent, 0]],
[pos[j, 1], pos[j_parent, 1]],
[pos[j, 2], pos[j_parent, 2]], zdir='z', c=colors_2d[j]))
points = ax_in.scatter(*keypoints[i].T, 10, color=colors_2d, edgecolors='white', zorder=10)
initialized = True
else:
image.set_data(all_frames[i])
for j, j_parent in enumerate(parents):
if j_parent == -1:
continue
# if len(parents) == keypoints.shape[1] and keypoints_metadata['layout_name'] != 'coco':
if len(parents) == keypoints.shape[1]:
lines[j - 1][0].set_data([keypoints[i, j, 0], keypoints[i, j_parent, 0]],
[keypoints[i, j, 1], keypoints[i, j_parent, 1]])
for n, ax in enumerate(ax_3d):
pos = poses[n][i]
lines_3d[n][j - 1][0].set_xdata([pos[j, 0], pos[j_parent, 0]])
lines_3d[n][j - 1][0].set_ydata([pos[j, 1], pos[j_parent, 1]])
lines_3d[n][j - 1][0].set_3d_properties([pos[j, 2], pos[j_parent, 2]], zdir='z')
points.set_offsets(keypoints[i])
print('{}/{} '.format(i, limit), end='\r')
fig.tight_layout()
anim = FuncAnimation(fig, update_video, frames=np.arange(0, limit), interval=1000 / fps, repeat=False)
if output.endswith('.mp4'):
Writer = writers['ffmpeg']
writer = Writer(fps=fps, metadata={}, bitrate=bitrate)
anim.save(output, writer=writer)
elif output.endswith('.gif'):
anim.save(output, dpi=80, writer='imagemagick')
else:
raise ValueError('Unsupported output format (only .mp4 and .gif are supported)')
plt.close()
def render_animation_T(keypoints, keypoints_metadata, poses, skeleton, fps, bitrate, azim, output,
viewport,
limit=-1, downsample=1, size=6, input_video_path=None, input_video_skip=0, viz_action="",
viz_subject=""):
"""
TODO
Render an animation. The supported output modes are:
-- 'interactive': display an interactive figure
(also works on notebooks if associated with %matplotlib inline)
-- 'html': render the animation as HTML5 video. Can be displayed in a notebook using HTML(...).
-- 'filename.mp4': render and export the animation as an h264 video (requires ffmpeg).
-- 'filename.gif': render and export the animation a gif file (requires imagemagick).
"""
output = output + "_" + viz_subject + "_" + viz_action + ".mp4"
print(output)
plt.ioff()
fig = plt.figure(figsize=(size * (1 + len(poses)), size))
ax_in = fig.add_subplot(1, 1 + len(poses), 1)
ax_in.get_xaxis().set_visible(False)
ax_in.get_yaxis().set_visible(False)
ax_in.set_axis_off()
ax_in.set_title('Input')
ax_3d = []
lines_3d = []
trajectories = []
radius = 1.7
for index, (title, data) in enumerate(poses.items()):
ax = fig.add_subplot(1, 1 + len(poses), index + 2, projection='3d')
ax.view_init(elev=15., azim=azim)
ax.set_xlim3d([-radius / 2, radius / 2])
ax.set_zlim3d([0, radius])
ax.set_ylim3d([-radius / 2, radius / 2])
# ax.set_aspect('equal')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
ax.dist = 7.5
ax.set_title(title) # , pad=35
ax_3d.append(ax)
lines_3d.append([])
trajectories.append(data[:, 0, [0, 1]])
poses = list(poses.values())
# Decode video
if input_video_path is None:
# Black background
all_frames = np.zeros((keypoints.shape[0], viewport[1], viewport[0]), dtype='uint8')
else:
# Load video using ffmpeg
all_frames = []
for f in read_video(input_video_path, skip=input_video_skip, limit=limit):
all_frames.append(f)
effective_length = min(keypoints.shape[0], len(all_frames))
all_frames = all_frames[:effective_length]
keypoints = keypoints[input_video_skip:] # todo remove
for idx in range(len(poses)):
poses[idx] = poses[idx][input_video_skip:]
if fps is None:
fps = get_fps(input_video_path)
if downsample > 1:
keypoints = downsample_tensor(keypoints, downsample)
all_frames = downsample_tensor(np.array(all_frames), downsample).astype('uint8')
for idx in range(len(poses)):
poses[idx] = downsample_tensor(poses[idx], downsample)
trajectories[idx] = downsample_tensor(trajectories[idx], downsample)
fps /= downsample
initialized = False
image = None
lines = []
points = None
if limit < 1:
limit = len(all_frames)
else:
limit = min(limit, len(all_frames))
parents = skeleton.parents()
def update_video(i):
nonlocal initialized, image, lines, points
for n, ax in enumerate(ax_3d):
ax.set_xlim3d([-radius / 2 + trajectories[n][i, 0], radius / 2 + trajectories[n][i, 0]])
ax.set_ylim3d([-radius / 2 + trajectories[n][i, 1], radius / 2 + trajectories[n][i, 1]])
# Update 2D poses
joints_right_2d = keypoints_metadata['keypoints_symmetry'][1]
joints_left_2d = keypoints_metadata['keypoints_symmetry'][0]
colors_2d = np.full(keypoints.shape[1], 'peru', dtype="object")
colors_2d[joints_right_2d] = 'darkseagreen'
colors_2d[joints_left_2d] = 'slateblue'
if not initialized:
image = ax_in.imshow(all_frames[i], aspect='equal')
for j, j_parent in enumerate(parents):
if j_parent == -1:
continue
# if len(parents) == keypoints.shape[1] and keypoints_metadata['layout_name'] != 'coco':
if len(parents) == keypoints.shape[1]:
# Draw skeleton only if keypoints match (otherwise we don't have the parents definition)
lines.append(ax_in.plot([keypoints[i, j, 0], keypoints[i, j_parent, 0]],
[keypoints[i, j, 1], keypoints[i, j_parent, 1]], color=colors_2d[j]))
col = 'red' if j in skeleton.joints_right() else 'black'
for n, ax in enumerate(ax_3d):
pos = poses[n][i]
lines_3d[n].append(ax.plot([pos[j, 0], pos[j_parent, 0]],
[pos[j, 1], pos[j_parent, 1]],
[pos[j, 2], pos[j_parent, 2]], zdir='z', c=colors_2d[j]))
# points = ax_in.scatter(*keypoints[i].T, 0, zorder=10)
initialized = True
else:
image.set_data(all_frames[i])
for j, j_parent in enumerate(parents):
if j_parent == -1:
continue
# if len(parents) == keypoints.shape[1] and keypoints_metadata['layout_name'] != 'coco':
if len(parents) == keypoints.shape[1]:
lines[j - 1][0].set_data([keypoints[i, j, 0], keypoints[i, j_parent, 0]],
[keypoints[i, j, 1], keypoints[i, j_parent, 1]])
for n, ax in enumerate(ax_3d):
pos = poses[n][i]
lines_3d[n][j - 1][0].set_xdata([pos[j, 0], pos[j_parent, 0]])
lines_3d[n][j - 1][0].set_ydata([pos[j, 1], pos[j_parent, 1]])
lines_3d[n][j - 1][0].set_3d_properties([pos[j, 2], pos[j_parent, 2]], zdir='z')
# points.set_offsets(keypoints[i])
print('{}/{} '.format(i, limit), end='\r')
fig.tight_layout()
anim = FuncAnimation(fig, update_video, frames=np.arange(0, limit), interval=1000 / fps, repeat=False)
if output.endswith('.mp4'):
Writer = writers['ffmpeg']
writer = Writer(fps=fps, metadata={}, bitrate=bitrate)
anim.save(output, writer=writer)
elif output.endswith('.gif'):
anim.save(output, dpi=80, writer='imagemagick')
else:
raise ValueError('Unsupported output format (only .mp4 and .gif are supported)')
plt.close()
def render_animation_humaneva(keypoints, keypoints_metadata, poses, skeleton, fps, bitrate, azim, output,
viewport,
limit=-1, downsample=1, size=6, input_video_path=None, input_video_skip=0, viz_action="",
viz_subject=""):
"""
TODO
Render an animation. The supported output modes are:
-- 'interactive': display an interactive figure
(also works on notebooks if associated with %matplotlib inline)
-- 'html': render the animation as HTML5 video. Can be displayed in a notebook using HTML(...).
-- 'filename.mp4': render and export the animation as an h264 video (requires ffmpeg).
-- 'filename.gif': render and export the animation a gif file (requires imagemagick).
"""
# output = output + "_" + viz_subject + "_" + viz_action + ".mp4"
# print(output)
plt.ioff()
fig = plt.figure(figsize=(size * (1 + len(poses)), size))
ax_in = fig.add_subplot(1, 1 + len(poses), 1)
ax_in.get_xaxis().set_visible(False)
ax_in.get_yaxis().set_visible(False)
ax_in.set_axis_off()
ax_in.set_title('Input')
ax_3d = []
lines_3d = []
lines_3d_anno = []
trajectories = []
radius = 1.7
for index, (title, data) in enumerate(poses.items()):
ax = fig.add_subplot(1, 1 + len(poses), index + 2, projection='3d')
ax.view_init(elev=15., azim=azim)
ax.set_xlim3d([-radius / 2, radius / 2])
ax.set_zlim3d([0, radius])
ax.set_ylim3d([-radius / 2, radius / 2])
# ax.set_aspect('equal')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
ax.dist = 7.5
ax.set_title(title) # , pad=35
ax_3d.append(ax)
lines_3d.append([])
trajectories.append(data[:, 0, [0, 1]])
poses = list(poses.values())
# Decode video
if input_video_path is None:
# Black background
all_frames = np.zeros((keypoints.shape[0], viewport[1], viewport[0]), dtype='uint8')
else:
# Load video using ffmpeg
all_frames = []
for f in read_video(input_video_path, skip=input_video_skip, limit=limit):
all_frames.append(f)
effective_length = min(keypoints.shape[0], len(all_frames))
all_frames = all_frames[:effective_length]
keypoints = keypoints[input_video_skip:] # todo remove
for idx in range(len(poses)):
poses[idx] = poses[idx][input_video_skip:]
if fps is None:
fps = get_fps(input_video_path)
if downsample > 1:
keypoints = downsample_tensor(keypoints, downsample)
all_frames = downsample_tensor(np.array(all_frames), downsample).astype('uint8')
for idx in range(len(poses)):
poses[idx] = downsample_tensor(poses[idx], downsample)
trajectories[idx] = downsample_tensor(trajectories[idx], downsample)
fps /= downsample
initialized = False
image = None
lines = []
points = None
if limit < 1:
limit = len(all_frames)
else:
limit = min(limit, len(all_frames))
parents = skeleton.parents()
def update_video(i):
nonlocal initialized, image, lines, points
for n, ax in enumerate(ax_3d):
ax.set_xlim3d([-radius / 2 + trajectories[n][i, 0], radius / 2 + trajectories[n][i, 0]])
ax.set_ylim3d([-radius / 2 + trajectories[n][i, 1], radius / 2 + trajectories[n][i, 1]])
# Update 2D poses
joints_right_2d = keypoints_metadata['keypoints_symmetry'][1]
joints_left_2d = keypoints_metadata['keypoints_symmetry'][0]
colors_2d = np.full(keypoints.shape[1], 'peru', dtype="object")
colors_2d[joints_right_2d] = 'darkseagreen'
colors_2d[joints_left_2d] = 'slateblue'
if not initialized:
image = ax_in.imshow(all_frames[i], aspect='equal')
for j, j_parent in enumerate(parents):
if j_parent == -1:
continue
# if len(parents) == keypoints.shape[1] and keypoints_metadata['layout_name'] != 'coco':
if len(parents) == keypoints.shape[1]:
# Draw skeleton only if keypoints match (otherwise we don't have the parents definition)
lines.append(ax_in.plot([keypoints[i, j, 0], keypoints[i, j_parent, 0]],
[keypoints[i, j, 1], keypoints[i, j_parent, 1]], color=colors_2d[j]))
col = 'red' if j in skeleton.joints_right() else 'black'
for n, ax in enumerate(ax_3d):
pos = poses[n][i]
lines_3d[n].append(ax.plot([pos[j, 0], pos[j_parent, 0]],
[pos[j, 1], pos[j_parent, 1]],
[pos[j, 2], pos[j_parent, 2]], zdir='z', c=colors_2d[j]))
ax.text(pos[j, 0] - 0.1, pos[j, 1] - 0.1, pos[j, 2] - 0.1, j)
# points = ax_in.scatter(*keypoints[i].T, 0, zorder=10)
initialized = True
else:
image.set_data(all_frames[i])
for j, j_parent in enumerate(parents):
if j_parent == -1:
continue
# if len(parents) == keypoints.shape[1] and keypoints_metadata['layout_name'] != 'coco':
if len(parents) == keypoints.shape[1]:
lines[(j - 1) * 2][0].set_data([keypoints[i, j, 0], keypoints[i, j_parent, 0]],
[keypoints[i, j, 1], keypoints[i, j_parent, 1]])
for n, ax in enumerate(ax_3d):
pos = poses[n][i]
lines_3d[n][j - 1][0].set_xdata([pos[j, 0], pos[j_parent, 0]])
lines_3d[n][j - 1][0].set_ydata([pos[j, 1], pos[j_parent, 1]])
lines_3d[n][j - 1][0].set_3d_properties([pos[j, 2], pos[j_parent, 2]], zdir='z')
# points.set_offsets(keypoints[i])
print('{}/{} '.format(i, limit), end='\r')
fig.tight_layout()
anim = FuncAnimation(fig, update_video, frames=np.arange(0, limit), interval=1000 / fps, repeat=False)
if output.endswith('.mp4'):
Writer = writers['ffmpeg']
writer = Writer(fps=fps, metadata={}, bitrate=bitrate)
anim.save(output, writer=writer)
elif output.endswith('.gif'):
anim.save(output, dpi=80, writer='imagemagick')
else:
raise ValueError('Unsupported output format (only .mp4 and .gif are supported)')
plt.close()
| 28,111 | 39.742029 | 119 | py |
P-STMO | P-STMO-main/common/generator_3dhp.py | import numpy as np
class ChunkedGenerator:
def __init__(self, batch_size, cameras, poses_3d, poses_2d, valid_frame,
chunk_length=1, pad=0, causal_shift=0,
shuffle=False, random_seed=1234,
augment=False, reverse_aug= False,kps_left=None, kps_right=None, joints_left=None, joints_right=None,
endless=False, out_all = False, MAE=False, train=True):
assert poses_3d is None or len(poses_3d) == len(poses_2d), (len(poses_3d), len(poses_2d))
assert cameras is None or len(cameras) == len(poses_2d)
pairs = []
self.saved_index = {}
start_index = 0
if train == True:
for key in poses_2d.keys():
assert poses_3d is None or poses_2d[key].shape[0] == poses_3d[key].shape[0]
n_chunks = (poses_2d[key].shape[0] + chunk_length - 1) // chunk_length
offset = (n_chunks * chunk_length - poses_2d[key].shape[0]) // 2
bounds = np.arange(n_chunks + 1) * chunk_length - offset
augment_vector = np.full(len(bounds - 1), False, dtype=bool)
reverse_augment_vector = np.full(len(bounds - 1), False, dtype=bool)
keys = np.tile(np.array(key).reshape([1,3]),(len(bounds - 1),1))
pairs += list(zip(keys, bounds[:-1], bounds[1:], augment_vector,reverse_augment_vector))
if reverse_aug:
pairs += list(zip(keys, bounds[:-1], bounds[1:], augment_vector, ~reverse_augment_vector))
if augment:
if reverse_aug:
pairs += list(zip(keys, bounds[:-1], bounds[1:], ~augment_vector,~reverse_augment_vector))
else:
pairs += list(zip(keys, bounds[:-1], bounds[1:], ~augment_vector, reverse_augment_vector))
end_index = start_index + poses_3d[key].shape[0]
self.saved_index[key] = [start_index,end_index]
start_index = start_index + poses_3d[key].shape[0]
else:
for key in poses_2d.keys():
assert poses_3d is None or poses_2d[key].shape[0] == poses_3d[key].shape[0]
n_chunks = (poses_2d[key].shape[0] + chunk_length - 1) // chunk_length
offset = (n_chunks * chunk_length - poses_2d[key].shape[0]) // 2
bounds = np.arange(n_chunks) * chunk_length - offset
bounds_low = bounds[valid_frame[key].astype(bool)]
bounds_high = bounds[valid_frame[key].astype(bool)] + np.ones(bounds_low.shape[0],dtype=int)
augment_vector = np.full(len(bounds_low), False, dtype=bool)
reverse_augment_vector = np.full(len(bounds_low), False, dtype=bool)
keys = np.tile(np.array(key).reshape([1, 1]), (len(bounds_low), 1))
pairs += list(zip(keys, bounds_low, bounds_high, augment_vector, reverse_augment_vector))
if reverse_aug:
pairs += list(zip(keys, bounds_low, bounds_high, augment_vector, ~reverse_augment_vector))
if augment:
if reverse_aug:
pairs += list(zip(keys, bounds_low, bounds_high, ~augment_vector, ~reverse_augment_vector))
else:
pairs += list(zip(keys, bounds_low, bounds_high, ~augment_vector, reverse_augment_vector))
end_index = start_index + poses_3d[key].shape[0]
self.saved_index[key] = [start_index, end_index]
start_index = start_index + poses_3d[key].shape[0]
if cameras is not None:
self.batch_cam = np.empty((batch_size, cameras[key].shape[-1]))
if poses_3d is not None:
self.batch_3d = np.empty((batch_size, chunk_length, poses_3d[key].shape[-2], poses_3d[key].shape[-1]))
self.batch_2d = np.empty((batch_size, chunk_length + 2 * pad, poses_2d[key].shape[-2], poses_2d[key].shape[-1]))
self.num_batches = (len(pairs) + batch_size - 1) // batch_size
self.batch_size = batch_size
self.random = np.random.RandomState(random_seed)
self.pairs = pairs
self.shuffle = shuffle
self.pad = pad
self.causal_shift = causal_shift
self.endless = endless
self.state = None
self.cameras = cameras
if cameras is not None:
self.cameras = cameras
self.poses_3d = poses_3d
self.poses_2d = poses_2d
self.augment = augment
self.kps_left = kps_left
self.kps_right = kps_right
self.joints_left = joints_left
self.joints_right = joints_right
self.out_all = out_all
self.MAE=MAE
self.valid_frame = valid_frame
self.train=train
def num_frames(self):
return self.num_batches * self.batch_size
def random_state(self):
return self.random
def set_random_state(self, random):
self.random = random
def augment_enabled(self):
return self.augment
def next_pairs(self):
if self.state is None:
if self.shuffle:
pairs = self.random.permutation(self.pairs)
else:
pairs = self.pairs
return 0, pairs
else:
return self.state
def get_batch(self, seq_i, start_3d, end_3d, flip, reverse):
if self.train==True:
subject,seq,cam_index = seq_i
seq_name = (subject,seq,cam_index)
else:
seq_name = seq_i[0]
start_2d = start_3d - self.pad - self.causal_shift
end_2d = end_3d + self.pad - self.causal_shift
seq_2d = self.poses_2d[seq_name].copy()
low_2d = max(start_2d, 0)
high_2d = min(end_2d, seq_2d.shape[0])
pad_left_2d = low_2d - start_2d
pad_right_2d = end_2d - high_2d
if pad_left_2d != 0 or pad_right_2d != 0:
self.batch_2d = np.pad(seq_2d[low_2d:high_2d], ((pad_left_2d, pad_right_2d), (0, 0), (0, 0)), 'edge')
else:
self.batch_2d = seq_2d[low_2d:high_2d]
if flip:
self.batch_2d[ :, :, 0] *= -1
self.batch_2d[ :, self.kps_left + self.kps_right] = self.batch_2d[ :,
self.kps_right + self.kps_left]
if reverse:
self.batch_2d = self.batch_2d[::-1].copy()
if not self.MAE:
if self.poses_3d is not None:
seq_3d = self.poses_3d[seq_name].copy()
if self.out_all:
low_3d = low_2d
high_3d = high_2d
pad_left_3d = pad_left_2d
pad_right_3d = pad_right_2d
else:
low_3d = max(start_3d, 0)
high_3d = min(end_3d, seq_3d.shape[0])
pad_left_3d = low_3d - start_3d
pad_right_3d = end_3d - high_3d
if pad_left_3d != 0 or pad_right_3d != 0:
self.batch_3d = np.pad(seq_3d[low_3d:high_3d],
((pad_left_3d, pad_right_3d), (0, 0), (0, 0)), 'edge')
else:
self.batch_3d = seq_3d[low_3d:high_3d]
if flip:
self.batch_3d[ :, :, 0] *= -1
self.batch_3d[ :, self.joints_left + self.joints_right] = \
self.batch_3d[ :, self.joints_right + self.joints_left]
if reverse:
self.batch_3d = self.batch_3d[::-1].copy()
if self.cameras is not None:
self.batch_cam = self.cameras[seq_name].copy()
if flip:
self.batch_cam[ 2] *= -1
self.batch_cam[ 7] *= -1
if self.train == True:
if self.MAE:
return np.zeros(9), self.batch_2d.copy(), seq, subject, int(cam_index)
if self.poses_3d is None and self.cameras is None:
return None, None, self.batch_2d.copy(), seq, subject, int(cam_index)
elif self.poses_3d is not None and self.cameras is None:
return np.zeros(9), self.batch_3d.copy(), self.batch_2d.copy(),seq, subject, int(cam_index)
elif self.poses_3d is None:
return self.batch_cam, None, self.batch_2d.copy(),seq, subject, int(cam_index)
else:
return self.batch_cam, self.batch_3d.copy(), self.batch_2d.copy(),seq, subject, int(cam_index)
else:
if self.MAE:
return np.zeros(9), self.batch_2d.copy(), seq_name, None, None
else:
return np.zeros(9), self.batch_3d.copy(), self.batch_2d.copy(), seq_name, None, None
| 8,837 | 43.19 | 120 | py |
P-STMO | P-STMO-main/common/load_data_3dhp_mae.py |
import torch.utils.data as data
import numpy as np
from common.utils import deterministic_random
from common.camera import world_to_camera, normalize_screen_coordinates
from common.generator_3dhp import ChunkedGenerator
class Fusion(data.Dataset):
def __init__(self, opt, root_path, train=True, MAE=False):
self.data_type = opt.dataset
self.train = train
self.keypoints_name = opt.keypoints
self.root_path = root_path
self.train_list = opt.subjects_train.split(',')
self.test_list = opt.subjects_test.split(',')
self.action_filter = None if opt.actions == '*' else opt.actions.split(',')
self.downsample = opt.downsample
self.subset = opt.subset
self.stride = opt.stride
self.crop_uv = opt.crop_uv
self.test_aug = opt.test_augmentation
self.pad = opt.pad
self.MAE=MAE
if self.train:
self.poses_train, self.poses_train_2d = self.prepare_data(opt.root_path, train=True)
# self.cameras_train, self.poses_train, self.poses_train_2d = self.fetch(dataset, self.train_list,
# subset=self.subset)
self.generator = ChunkedGenerator(opt.batchSize // opt.stride, None, self.poses_train,
self.poses_train_2d, None, chunk_length=self.stride, pad=self.pad,
augment=opt.data_augmentation, reverse_aug=opt.reverse_augmentation,
kps_left=self.kps_left, kps_right=self.kps_right,
joints_left=self.joints_left,
joints_right=self.joints_right, out_all=opt.out_all, MAE=MAE, train = True)
print('INFO: Training on {} frames'.format(self.generator.num_frames()))
else:
self.poses_test, self.poses_test_2d, self.valid_frame = self.prepare_data(opt.root_path, train=False)
# self.cameras_test, self.poses_test, self.poses_test_2d = self.fetch(dataset, self.test_list,
# subset=self.subset)
self.generator = ChunkedGenerator(opt.batchSize // opt.stride, None, self.poses_test,
self.poses_test_2d, self.valid_frame,
pad=self.pad, augment=False, kps_left=self.kps_left,
kps_right=self.kps_right, joints_left=self.joints_left,
joints_right=self.joints_right, MAE=MAE, train = False)
self.key_index = self.generator.saved_index
print('INFO: Testing on {} frames'.format(self.generator.num_frames()))
def prepare_data(self, path, train=True):
out_poses_3d = {}
out_poses_2d = {}
valid_frame={}
self.kps_left, self.kps_right = [5, 6, 7, 11, 12, 13], [2, 3, 4, 8, 9, 10]
self.joints_left, self.joints_right = [5, 6, 7, 11, 12, 13], [2, 3, 4, 8, 9, 10]
if train == True:
data = np.load(path+"data_train_3dhp.npz",allow_pickle=True)['data'].item()
for seq in data.keys():
for cam in data[seq][0].keys():
anim = data[seq][0][cam]
subject_name, seq_name = seq.split(" ")
data_3d = anim['data_3d']
data_3d[:, :14] -= data_3d[:, 14:15]
data_3d[:, 15:] -= data_3d[:, 14:15]
out_poses_3d[(subject_name, seq_name, cam)] = data_3d
data_2d = anim['data_2d']
data_2d[..., :2] = normalize_screen_coordinates(data_2d[..., :2], w=2048, h=2048)
out_poses_2d[(subject_name, seq_name, cam)]=data_2d
return out_poses_3d, out_poses_2d
else:
data = np.load(path + "data_test_3dhp.npz", allow_pickle=True)['data'].item()
for seq in data.keys():
anim = data[seq]
valid_frame[seq] = anim["valid"]
data_3d = anim['data_3d']
data_3d[:, :14] -= data_3d[:, 14:15]
data_3d[:, 15:] -= data_3d[:, 14:15]
out_poses_3d[seq] = data_3d
data_2d = anim['data_2d']
if seq == "TS5" or seq == "TS6":
width = 1920
height = 1080
else:
width = 2048
height = 2048
data_2d[..., :2] = normalize_screen_coordinates(data_2d[..., :2], w=width, h=height)
out_poses_2d[seq] = data_2d
return out_poses_3d, out_poses_2d, valid_frame
def fetch(self, dataset, subjects, subset=1, parse_3d_poses=True):
out_poses_3d = {}
out_poses_2d = {}
out_camera_params = {}
for subject in subjects:
for action in self.keypoints[subject].keys():
if self.action_filter is not None:
found = False
for a in self.action_filter:
if action.startswith(a):
found = True
break
if not found:
continue
poses_2d = self.keypoints[subject][action]
for i in range(len(poses_2d)):
out_poses_2d[(subject, action, i)] = poses_2d[i]
if subject in dataset.cameras():
cams = dataset.cameras()[subject]
assert len(cams) == len(poses_2d), 'Camera count mismatch'
for i, cam in enumerate(cams):
if 'intrinsic' in cam:
out_camera_params[(subject, action, i)] = cam['intrinsic']
if parse_3d_poses and 'positions_3d' in dataset[subject][action]:
poses_3d = dataset[subject][action]['positions_3d']
assert len(poses_3d) == len(poses_2d), 'Camera count mismatch'
for i in range(len(poses_3d)):
out_poses_3d[(subject, action, i)] = poses_3d[i]
if len(out_camera_params) == 0:
out_camera_params = None
if len(out_poses_3d) == 0:
out_poses_3d = None
stride = self.downsample
if subset < 1:
for key in out_poses_2d.keys():
n_frames = int(round(len(out_poses_2d[key]) // stride * subset) * stride)
start = deterministic_random(0, len(out_poses_2d[key]) - n_frames + 1, str(len(out_poses_2d[key])))
out_poses_2d[key] = out_poses_2d[key][start:start + n_frames:stride]
if out_poses_3d is not None:
out_poses_3d[key] = out_poses_3d[key][start:start + n_frames:stride]
elif stride > 1:
for key in out_poses_2d.keys():
out_poses_2d[key] = out_poses_2d[key][::stride]
if out_poses_3d is not None:
out_poses_3d[key] = out_poses_3d[key][::stride]
return out_camera_params, out_poses_3d, out_poses_2d
def __len__(self):
return len(self.generator.pairs)
#return 200
def __getitem__(self, index):
seq_name, start_3d, end_3d, flip, reverse = self.generator.pairs[index]
if self.MAE:
cam, input_2D, seq, subject, cam_ind = self.generator.get_batch(seq_name, start_3d, end_3d, flip,
reverse)
if self.train == False and self.test_aug:
_, input_2D_aug, _, _,_ = self.generator.get_batch(seq_name, start_3d, end_3d, flip=True, reverse=reverse)
input_2D = np.concatenate((np.expand_dims(input_2D,axis=0),np.expand_dims(input_2D_aug,axis=0)),0)
else:
cam, gt_3D, input_2D, seq, subject, cam_ind = self.generator.get_batch(seq_name, start_3d, end_3d, flip, reverse)
if self.train == False and self.test_aug:
_, _, input_2D_aug, _, _,_ = self.generator.get_batch(seq_name, start_3d, end_3d, flip=True, reverse=reverse)
input_2D = np.concatenate((np.expand_dims(input_2D,axis=0),np.expand_dims(input_2D_aug,axis=0)),0)
bb_box = np.array([0, 0, 1, 1])
input_2D_update = input_2D
scale = np.float(1.0)
if self.MAE:
if self.train == True:
return cam, input_2D_update, seq, subject, scale, bb_box, cam_ind
else:
return cam, input_2D_update, seq, scale, bb_box
else:
if self.train == True:
return cam, gt_3D, input_2D_update, seq, subject, scale, bb_box, cam_ind
else:
return cam, gt_3D, input_2D_update, seq, scale, bb_box
| 9,051 | 45.420513 | 125 | py |
P-STMO | P-STMO-main/common/camera.py | import sys
import numpy as np
import torch
def normalize_screen_coordinates(X, w, h):
assert X.shape[-1] == 2
return X / w * 2 - [1, h / w]
def image_coordinates(X, w, h):
assert X.shape[-1] == 2
# Reverse camera frame normalization
return (X + [1, h / w]) * w / 2
def world_to_camera(X, R, t):
Rt = wrap(qinverse, R)
return wrap(qrot, np.tile(Rt, (*X.shape[:-1], 1)), X - t)
def camera_to_world(X, R, t):
return wrap(qrot, np.tile(R, (*X.shape[:-1], 1)), X) + t
def wrap(func, *args, unsqueeze=False):
args = list(args)
for i, arg in enumerate(args):
if type(arg) == np.ndarray:
args[i] = torch.from_numpy(arg)
if unsqueeze:
args[i] = args[i].unsqueeze(0)
result = func(*args)
if isinstance(result, tuple):
result = list(result)
for i, res in enumerate(result):
if type(res) == torch.Tensor:
if unsqueeze:
res = res.squeeze(0)
result[i] = res.numpy()
return tuple(result)
elif type(result) == torch.Tensor:
if unsqueeze:
result = result.squeeze(0)
return result.numpy()
else:
return result
def qrot(q, v):
assert q.shape[-1] == 4
assert v.shape[-1] == 3
assert q.shape[:-1] == v.shape[:-1]
qvec = q[..., 1:]
uv = torch.cross(qvec, v, dim=len(q.shape) - 1)
uuv = torch.cross(qvec, uv, dim=len(q.shape) - 1)
return (v + 2 * (q[..., :1] * uv + uuv))
def qinverse(q, inplace=False):
if inplace:
q[..., 1:] *= -1
return q
else:
w = q[..., :1]
xyz = q[..., 1:]
return torch.cat((w, -xyz), dim=len(q.shape) - 1)
def get_uvd2xyz(uvd, gt_3D, cam):
N, T, V,_ = uvd.size()
dec_out_all = uvd.view(-1, T, V, 3).clone()
root = gt_3D[:, :, 0, :].unsqueeze(-2).repeat(1, 1, V, 1).clone()
enc_in_all = uvd[:, :, :, :2].view(-1, T, V, 2).clone()
cam_f_all = cam[..., :2].view(-1,1,1,2).repeat(1,T,V,1)
cam_c_all = cam[..., 2:4].view(-1,1,1,2).repeat(1,T,V,1)
z_global = dec_out_all[:, :, :, 2]
z_global[:, :, 0] = root[:, :, 0, 2]
z_global[:, :, 1:] = dec_out_all[:, :, 1:, 2] + root[:, :, 1:, 2]
z_global = z_global.unsqueeze(-1)
uv = enc_in_all - cam_c_all
xy = uv * z_global.repeat(1, 1, 1, 2) / cam_f_all
xyz_global = torch.cat((xy, z_global), -1)
xyz_offset = (xyz_global - xyz_global[:, :, 0, :].unsqueeze(-2).repeat(1, 1, V, 1))
return xyz_offset
| 2,451 | 25.652174 | 87 | py |
P-STMO | P-STMO-main/common/mocap_dataset.py |
class MocapDataset:
def __init__(self, fps, skeleton):
self._skeleton = skeleton
self._fps = fps
self._data = None
self._cameras = None
def remove_joints(self, joints_to_remove):
kept_joints = self._skeleton.remove_joints(joints_to_remove)
for subject in self._data.keys():
for action in self._data[subject].keys():
s = self._data[subject][action]
s['positions'] = s['positions'][:, kept_joints]
def __getitem__(self, key):
return self._data[key]
def subjects(self):
return self._data.keys()
def fps(self):
return self._fps
def skeleton(self):
return self._skeleton
def cameras(self):
return self._cameras
def supports_semi_supervised(self):
return False
| 842 | 22.416667 | 68 | py |
P-STMO | P-STMO-main/common/generator_tds.py | import numpy as np
class ChunkedGenerator:
def __init__(self, batch_size, cameras, poses_3d, poses_2d,
chunk_length=1, pad=0, causal_shift=0,
shuffle=False, random_seed=1234,
augment=False, reverse_aug= False,kps_left=None, kps_right=None, joints_left=None, joints_right=None,
endless=False, out_all = False, MAE=False, tds=1):
assert poses_3d is None or len(poses_3d) == len(poses_2d), (len(poses_3d), len(poses_2d))
assert cameras is None or len(cameras) == len(poses_2d)
pairs = []
self.saved_index = {}
start_index = 0
for key in poses_2d.keys():
assert poses_3d is None or poses_3d[key].shape[0] == poses_3d[key].shape[0]
n_chunks = (poses_2d[key].shape[0] + chunk_length - 1) // chunk_length
offset = (n_chunks * chunk_length - poses_2d[key].shape[0]) // 2
bounds = np.arange(n_chunks + 1) * chunk_length - offset
augment_vector = np.full(len(bounds - 1), False, dtype=bool)
reverse_augment_vector = np.full(len(bounds - 1), False, dtype=bool)
keys = np.tile(np.array(key).reshape([1,3]),(len(bounds - 1),1))
pairs += list(zip(keys, bounds[:-1], bounds[1:], augment_vector,reverse_augment_vector))
if reverse_aug:
pairs += list(zip(keys, bounds[:-1], bounds[1:], augment_vector, ~reverse_augment_vector))
if augment:
if reverse_aug:
pairs += list(zip(keys, bounds[:-1], bounds[1:], ~augment_vector,~reverse_augment_vector))
else:
pairs += list(zip(keys, bounds[:-1], bounds[1:], ~augment_vector, reverse_augment_vector))
end_index = start_index + poses_3d[key].shape[0]
self.saved_index[key] = [start_index,end_index]
start_index = start_index + poses_3d[key].shape[0]
if cameras is not None:
self.batch_cam = np.empty((batch_size, cameras[key].shape[-1]))
if poses_3d is not None:
self.batch_3d = np.empty((batch_size, chunk_length, poses_3d[key].shape[-2], poses_3d[key].shape[-1]))
self.batch_2d = np.empty((batch_size, chunk_length + 2 * pad, poses_2d[key].shape[-2], poses_2d[key].shape[-1]))
self.num_batches = (len(pairs) + batch_size - 1) // batch_size
self.batch_size = batch_size
self.random = np.random.RandomState(random_seed)
self.pairs = pairs
self.shuffle = shuffle
self.pad = pad
self.causal_shift = causal_shift
self.endless = endless
self.state = None
self.cameras = cameras
if cameras is not None:
self.cameras = cameras
self.poses_3d = poses_3d
self.poses_2d = poses_2d
self.augment = augment
self.kps_left = kps_left
self.kps_right = kps_right
self.joints_left = joints_left
self.joints_right = joints_right
self.out_all = out_all
self.MAE = MAE
self.tds = tds
def num_frames(self):
return self.num_batches * self.batch_size
def random_state(self):
return self.random
def set_random_state(self, random):
self.random = random
def augment_enabled(self):
return self.augment
def next_pairs(self):
if self.state is None:
if self.shuffle:
pairs = self.random.permutation(self.pairs)
else:
pairs = self.pairs
return 0, pairs
else:
return self.state
def get_batch(self, seq_i, start_3d, end_3d, flip, reverse):
subject,action,cam_index = seq_i
seq_name = (subject,action,int(cam_index))
start_2d = start_3d - self.pad * self.tds - self.causal_shift
end_2d = end_3d + self.pad * self.tds - self.causal_shift
seq_2d = self.poses_2d[seq_name].copy()
low_2d = max(start_2d, 0)
high_2d = min(end_2d, seq_2d.shape[0])
pad_left_2d = low_2d - start_2d
pad_right_2d = end_2d - high_2d
if pad_left_2d != 0:
data_pad = np.repeat(seq_2d[0:1],pad_left_2d,axis=0)
new_data = np.concatenate((data_pad, seq_2d[low_2d:high_2d]), axis=0)
self.batch_2d = new_data[::self.tds]
#self.batch_2d = np.pad(seq_2d[low_2d:high_2d], ((pad_left_2d, pad_right_2d), (0, 0), (0, 0)), 'edge')
elif pad_right_2d != 0:
data_pad = np.repeat(seq_2d[seq_2d.shape[0]-1:seq_2d.shape[0]], pad_right_2d, axis=0)
new_data = np.concatenate((seq_2d[low_2d:high_2d], data_pad), axis=0)
self.batch_2d = new_data[::self.tds]
#self.batch_2d = np.pad(seq_2d[low_2d:high_2d], ((pad_left_2d, pad_right_2d), (0, 0), (0, 0)), 'edge')
else:
self.batch_2d = seq_2d[low_2d:high_2d:self.tds]
if flip:
self.batch_2d[ :, :, 0] *= -1
self.batch_2d[ :, self.kps_left + self.kps_right] = self.batch_2d[ :,
self.kps_right + self.kps_left]
if reverse:
self.batch_2d = self.batch_2d[::-1].copy()
if not self.MAE:
if self.poses_3d is not None:
seq_3d = self.poses_3d[seq_name].copy()
if self.out_all:
low_3d = low_2d
high_3d = high_2d
pad_left_3d = pad_left_2d
pad_right_3d = pad_right_2d
else:
low_3d = max(start_3d, 0)
high_3d = min(end_3d, seq_3d.shape[0])
pad_left_3d = low_3d - start_3d
pad_right_3d = end_3d - high_3d
if pad_left_3d != 0:
data_pad = np.repeat(seq_3d[0:1], pad_left_3d, axis=0)
new_data = np.concatenate((data_pad, seq_3d[low_3d:high_3d]), axis=0)
self.batch_3d = new_data[::self.tds]
elif pad_right_3d != 0:
data_pad = np.repeat(seq_3d[seq_3d.shape[0] - 1:seq_3d.shape[0]], pad_right_3d, axis=0)
new_data = np.concatenate((seq_3d[low_3d:high_3d], data_pad), axis=0)
self.batch_3d = new_data[::self.tds]
# self.batch_3d = np.pad(seq_3d[low_3d:high_3d],
# ((pad_left_3d, pad_right_3d), (0, 0), (0, 0)), 'edge')
else:
self.batch_3d = seq_3d[low_3d:high_3d:self.tds]
if flip:
self.batch_3d[ :, :, 0] *= -1
self.batch_3d[ :, self.joints_left + self.joints_right] = \
self.batch_3d[ :, self.joints_right + self.joints_left]
if reverse:
self.batch_3d = self.batch_3d[::-1].copy()
if self.cameras is not None:
self.batch_cam = self.cameras[seq_name].copy()
if flip:
self.batch_cam[ 2] *= -1
self.batch_cam[ 7] *= -1
if self.MAE:
return self.batch_cam, self.batch_2d.copy(), action, subject, int(cam_index)
if self.poses_3d is None and self.cameras is None:
return None, None, self.batch_2d.copy(), action, subject, int(cam_index)
elif self.poses_3d is not None and self.cameras is None:
return np.zeros(9), self.batch_3d.copy(), self.batch_2d.copy(),action, subject, int(cam_index)
elif self.poses_3d is None:
return self.batch_cam, None, self.batch_2d.copy(),action, subject, int(cam_index)
else:
return self.batch_cam, self.batch_3d.copy(), self.batch_2d.copy(),action, subject, int(cam_index)
| 7,836 | 42.06044 | 120 | py |
P-STMO | P-STMO-main/common/utils.py | import torch
import numpy as np
import hashlib
from torch.autograd import Variable
import os
def deterministic_random(min_value, max_value, data):
digest = hashlib.sha256(data.encode()).digest()
raw_value = int.from_bytes(digest[:4], byteorder='little', signed=False)
return int(raw_value / (2 ** 32 - 1) * (max_value - min_value)) + min_value
def mpjpe_cal(predicted, target):
assert predicted.shape == target.shape
return torch.mean(torch.norm(predicted - target, dim=len(target.shape) - 1))
def test_calculation(predicted, target, action, error_sum, data_type, subject, MAE=False):
error_sum = mpjpe_by_action_p1(predicted, target, action, error_sum)
if not MAE:
error_sum = mpjpe_by_action_p2(predicted, target, action, error_sum)
return error_sum
def mpjpe_by_action_p1(predicted, target, action, action_error_sum):
assert predicted.shape == target.shape
batch_num = predicted.size(0)
frame_num = predicted.size(1)
dist = torch.mean(torch.norm(predicted - target, dim=len(target.shape) - 1), dim=len(target.shape) - 2)
if len(set(list(action))) == 1:
end_index = action[0].find(' ')
if end_index != -1:
action_name = action[0][:end_index]
else:
action_name = action[0]
action_error_sum[action_name]['p1'].update(torch.mean(dist).item()*batch_num*frame_num, batch_num*frame_num)
else:
for i in range(batch_num):
end_index = action[i].find(' ')
if end_index != -1:
action_name = action[i][:end_index]
else:
action_name = action[i]
action_error_sum[action_name]['p1'].update(torch.mean(dist[i]).item()*frame_num, frame_num)
return action_error_sum
def mpjpe_by_action_p2(predicted, target, action, action_error_sum):
assert predicted.shape == target.shape
num = predicted.size(0)
pred = predicted.detach().cpu().numpy().reshape(-1, predicted.shape[-2], predicted.shape[-1])
gt = target.detach().cpu().numpy().reshape(-1, target.shape[-2], target.shape[-1])
dist = p_mpjpe(pred, gt)
if len(set(list(action))) == 1:
end_index = action[0].find(' ')
if end_index != -1:
action_name = action[0][:end_index]
else:
action_name = action[0]
action_error_sum[action_name]['p2'].update(np.mean(dist) * num, num)
else:
for i in range(num):
end_index = action[i].find(' ')
if end_index != -1:
action_name = action[i][:end_index]
else:
action_name = action[i]
action_error_sum[action_name]['p2'].update(np.mean(dist), 1)
return action_error_sum
def p_mpjpe(predicted, target):
assert predicted.shape == target.shape
muX = np.mean(target, axis=1, keepdims=True)
muY = np.mean(predicted, axis=1, keepdims=True)
X0 = target - muX
Y0 = predicted - muY
normX = np.sqrt(np.sum(X0 ** 2, axis=(1, 2), keepdims=True))
normY = np.sqrt(np.sum(Y0 ** 2, axis=(1, 2), keepdims=True))
X0 /= normX
Y0 /= normY
H = np.matmul(X0.transpose(0, 2, 1), Y0)
U, s, Vt = np.linalg.svd(H)
V = Vt.transpose(0, 2, 1)
R = np.matmul(V, U.transpose(0, 2, 1))
sign_detR = np.sign(np.expand_dims(np.linalg.det(R), axis=1))
V[:, :, -1] *= sign_detR
s[:, -1] *= sign_detR.flatten()
R = np.matmul(V, U.transpose(0, 2, 1))
tr = np.expand_dims(np.sum(s, axis=1, keepdims=True), axis=2)
a = tr * normX / normY
t = muX - a * np.matmul(muY, R)
predicted_aligned = a * np.matmul(predicted, R) + t
return np.mean(np.linalg.norm(predicted_aligned - target, axis=len(target.shape) - 1), axis=len(target.shape) - 2)
def define_actions( action ):
actions = ["Directions","Discussion","Eating","Greeting",
"Phoning","Photo","Posing","Purchases",
"Sitting","SittingDown","Smoking","Waiting",
"WalkDog","Walking","WalkTogether"]
if action == "All" or action == "all" or action == '*':
return actions
if not action in actions:
raise( ValueError, "Unrecognized action: %s" % action )
return [action]
def define_error_list(actions):
error_sum = {}
error_sum.update({actions[i]: {'p1':AccumLoss(), 'p2':AccumLoss()} for i in range(len(actions))})
return error_sum
class AccumLoss(object):
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val
self.count += n
self.avg = self.sum / self.count
def get_varialbe(split, target):
num = len(target)
var = []
if split == 'train':
for i in range(num):
temp = Variable(target[i], requires_grad=False).contiguous().type(torch.cuda.FloatTensor)
var.append(temp)
else:
for i in range(num):
temp = Variable(target[i]).contiguous().cuda().type(torch.cuda.FloatTensor)
var.append(temp)
return var
def print_error(data_type, action_error_sum, is_train):
mean_error_p1, mean_error_p2 = print_error_action(action_error_sum, is_train)
return mean_error_p1, mean_error_p2
def print_error_action(action_error_sum, is_train):
mean_error_each = {'p1': 0.0, 'p2': 0.0}
mean_error_all = {'p1': AccumLoss(), 'p2': AccumLoss()}
if is_train == 0:
print("{0:=^12} {1:=^10} {2:=^8}".format("Action", "p#1 mm", "p#2 mm"))
for action, value in action_error_sum.items():
if is_train == 0:
print("{0:<12} ".format(action), end="")
mean_error_each['p1'] = action_error_sum[action]['p1'].avg * 1000.0
mean_error_all['p1'].update(mean_error_each['p1'], 1)
mean_error_each['p2'] = action_error_sum[action]['p2'].avg * 1000.0
mean_error_all['p2'].update(mean_error_each['p2'], 1)
if is_train == 0:
print("{0:>6.2f} {1:>10.2f}".format(mean_error_each['p1'], mean_error_each['p2']))
if is_train == 0:
print("{0:<12} {1:>6.2f} {2:>10.2f}".format("Average", mean_error_all['p1'].avg, \
mean_error_all['p2'].avg))
return mean_error_all['p1'].avg, mean_error_all['p2'].avg
def save_model(previous_name, save_dir,epoch, data_threshold, model, model_name):
# if os.path.exists(previous_name):
# os.remove(previous_name)
torch.save(model.state_dict(),
'%s/%s_%d_%d.pth' % (save_dir, model_name, epoch, data_threshold * 100))
previous_name = '%s/%s_%d_%d.pth' % (save_dir, model_name, epoch, data_threshold * 100)
return previous_name
def save_model_new(save_dir,epoch, data_threshold, lr, optimizer, model, model_name):
# if os.path.exists(previous_name):
# os.remove(previous_name)
# torch.save(model.state_dict(),
# '%s/%s_%d_%d.pth' % (save_dir, model_name, epoch, data_threshold * 100))
torch.save({
'epoch': epoch,
'lr': lr,
'optimizer': optimizer.state_dict(),
'model_pos': model.state_dict(),
},
'%s/%s_%d_%d.pth' % (save_dir, model_name, epoch, data_threshold * 100))
| 7,304 | 31.039474 | 118 | py |
P-STMO | P-STMO-main/common/data_to_npz_3dhp_test.py | import os
import numpy as np
from common.utils_3dhp import *
import h5py
import scipy.io as scio
data_path=r'F:\mpi_inf_3dhp\mpi_inf_3dhp_test_set'
cam_set = [0, 1, 2, 4, 5, 6, 7, 8]
# joint_set = [8, 6, 15, 16, 17, 10, 11, 12, 24, 25, 26, 19, 20, 21, 5, 4, 7]
joint_set = [7, 5, 14, 15, 16, 9, 10, 11, 23, 24, 25, 18, 19, 20, 4, 3, 6]
dic_seq={}
for root, dirs, files in os.walk(data_path):
for file in files:
if file.endswith("mat"):
path = root.split("\\")
subject = path[-1][2]
print("loading %s..."%path[-1])
# temp = mpii_get_sequence_info(subject, seq)
#
# frames = temp[0]
# fps = temp[1]
data = h5py.File(os.path.join(root, file))
valid_frame = np.squeeze(data['valid_frame'].value)
data_2d = np.squeeze(data['annot2'].value)
data_3d = np.squeeze(data['univ_annot3'].value)
dic_data = {"data_2d":data_2d,"data_3d":data_3d, "valid":valid_frame}
dic_seq.update({path[-1]:dic_data})
np.savez_compressed('data_test_3dhp', data=dic_seq)
| 1,133 | 20.807692 | 81 | py |
P-STMO | P-STMO-main/common/data_to_npz_3dhp.py | import os
import numpy as np
from common.utils_3dhp import *
import scipy.io as scio
data_path=r'F:\mpi_inf_3dhp\data'
cam_set = [0, 1, 2, 4, 5, 6, 7, 8]
# joint_set = [8, 6, 15, 16, 17, 10, 11, 12, 24, 25, 26, 19, 20, 21, 5, 4, 7]
joint_set = [7, 5, 14, 15, 16, 9, 10, 11, 23, 24, 25, 18, 19, 20, 4, 3, 6]
dic_seq={}
for root, dirs, files in os.walk(data_path):
for file in files:
if file.endswith("mat"):
path = root.split("\\")
subject = path[-2][1]
seq = path[-1][3]
print("loading %s %s..."%(path[-2],path[-1]))
temp = mpii_get_sequence_info(subject, seq)
frames = temp[0]
fps = temp[1]
data = scio.loadmat(os.path.join(root, file))
cameras = data['cameras'][0]
for cam_idx in range(len(cameras)):
assert cameras[cam_idx] == cam_idx
data_2d = data['annot2'][cam_set]
data_3d = data['univ_annot3'][cam_set]
dic_cam = {}
a = len(data_2d)
for cam_idx in range(len(data_2d)):
data_2d_cam = data_2d[cam_idx][0]
data_3d_cam = data_3d[cam_idx][0]
data_2d_cam = data_2d_cam.reshape(data_2d_cam.shape[0], 28,2)
data_3d_cam = data_3d_cam.reshape(data_3d_cam.shape[0], 28,3)
data_2d_select = data_2d_cam[:frames, joint_set]
data_3d_select = data_3d_cam[:frames, joint_set]
dic_data = {"data_2d":data_2d_select,"data_3d":data_3d_select}
dic_cam.update({str(cam_set[cam_idx]):dic_data})
dic_seq.update({path[-2]+" "+path[-1]:[dic_cam, fps]})
np.savez_compressed('data_train_3dhp', data=dic_seq)
| 1,764 | 25.343284 | 78 | py |
P-STMO | P-STMO-main/common/opt.py | import argparse
import os
import math
import time
import torch
class opts():
def __init__(self):
self.parser = argparse.ArgumentParser()
def init(self):
self.parser.add_argument('--layers', default=3, type=int)
self.parser.add_argument('--channel', default=256, type=int)
self.parser.add_argument('--d_hid', default=512, type=int)
self.parser.add_argument('--dataset', type=str, default='h36m')
self.parser.add_argument('-k', '--keypoints', default='cpn_ft_h36m_dbb', type=str)
self.parser.add_argument('--data_augmentation', type=bool, default=True)
self.parser.add_argument('--reverse_augmentation', type=bool, default=False)
self.parser.add_argument('--test_augmentation', type=bool, default=True)
self.parser.add_argument('--crop_uv', type=int, default=0)
self.parser.add_argument('--root_path', type=str, default='dataset/')
self.parser.add_argument('-a', '--actions', default='*', type=str)
self.parser.add_argument('--downsample', default=1, type=int)
self.parser.add_argument('--subset', default=1, type=float)
self.parser.add_argument('-s', '--stride', default=1, type=int)
self.parser.add_argument('--gpu', default='0', type=str, help='')
self.parser.add_argument('--train', type=int, default=0)
self.parser.add_argument('--test', type=int, default=1)
self.parser.add_argument('--nepoch', type=int, default=80)
self.parser.add_argument('-b','--batchSize', type=int, default=160)
self.parser.add_argument('--lr', type=float, default=1e-3)
self.parser.add_argument('--lr_refine', type=float, default=1e-5)
self.parser.add_argument('--lr_decay_large', type=float, default=0.5)
self.parser.add_argument('--large_decay_epoch', type=int, default=80)
self.parser.add_argument('--workers', type=int, default=8)
self.parser.add_argument('-lrd', '--lr_decay', default=0.95, type=float)
self.parser.add_argument('-f','--frames', type=int, default=243)
self.parser.add_argument('--pad', type=int, default=121)
self.parser.add_argument('--refine', action='store_true')
self.parser.add_argument('--reload', type=int, default=0)
self.parser.add_argument('--refine_reload', type=int, default=0)
self.parser.add_argument('-c','--checkpoint', type=str, default='model')
self.parser.add_argument('--previous_dir', type=str, default='')
self.parser.add_argument('--n_joints', type=int, default=17)
self.parser.add_argument('--out_joints', type=int, default=17)
self.parser.add_argument('--out_all', type=int, default=1)
self.parser.add_argument('--in_channels', type=int, default=2)
self.parser.add_argument('--out_channels', type=int, default=3)
self.parser.add_argument('-previous_best_threshold', type=float, default= math.inf)
self.parser.add_argument('-previous_name', type=str, default='')
self.parser.add_argument('--previous_refine_name', type=str, default='')
self.parser.add_argument('--manualSeed', type=int, default=1)
self.parser.add_argument('--MAE', action='store_true')
self.parser.add_argument('-tmr','--temporal_mask_rate', type=float, default=0)
self.parser.add_argument('-smn', '--spatial_mask_num', type=int, default=0)
self.parser.add_argument('-tds', '--t_downsample', type=int, default=1)
self.parser.add_argument('--MAE_reload', type=int, default=0)
self.parser.add_argument('-r', '--resume', action='store_true')
def parse(self):
self.init()
self.opt = self.parser.parse_args()
self.opt.pad = (self.opt.frames-1) // 2
stride_num = {
'9': [1, 3, 3],
'27': [3, 3, 3],
'351': [3, 9, 13],
'81': [3, 3, 3, 3],
'243': [3, 3, 3, 3, 3],
}
if str(self.opt.frames) in stride_num:
self.opt.stride_num = stride_num[str(self.opt.frames)]
else:
self.opt.stride_num = None
print('no stride_num')
exit()
self.opt.subjects_train = 'S1,S5,S6,S7,S8'
self.opt.subjects_test = 'S9,S11'
#self.opt.subjects_test = 'S11'
#if self.opt.train:
logtime = time.strftime('%m%d_%H%M_%S_')
ckp_suffix = ''
if self.opt.refine:
ckp_suffix='_refine'
elif self.opt.MAE:
ckp_suffix = '_pretrain'
else:
ckp_suffix = '_STMO'
self.opt.checkpoint = 'checkpoint/'+self.opt.checkpoint + '_%d'%(self.opt.pad*2+1) + \
'%s'%ckp_suffix
if not os.path.exists(self.opt.checkpoint):
os.makedirs(self.opt.checkpoint)
if self.opt.train:
args = dict((name, getattr(self.opt, name)) for name in dir(self.opt)
if not name.startswith('_'))
file_name = os.path.join(self.opt.checkpoint, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('==> Args:\n')
for k, v in sorted(args.items()):
opt_file.write(' %s: %s\n' % (str(k), str(v)))
opt_file.write('==> Args:\n')
return self.opt
| 5,367 | 42.290323 | 94 | py |
P-STMO | P-STMO-main/common/draw_3d_keypoint_3dhp.py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.image as mpimg
from mpl_toolkits.mplot3d import Axes3D
import scipy.io as scio
parent = [16, 15, 1, 2, 3, 1, 5, 6, 14, 8, 9, 14, 11, 12, 14, 14, 1]
data = scio.loadmat('../checkpoint/inference_data.mat')
joints_right=[2, 3, 4, 8, 9, 10]
#data_3d = data["TS1"][:,:,:,100]
#data_3d = data["TS4"][:,:,:,80]
data_3d = data["TS6"][:,:,:,10]
data_3d = np.squeeze(data_3d,axis = 2)
data_3d=np.transpose(data_3d,(1,0))
data_3d = data_3d - data_3d[14:15]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xy_radius=1000
radius=1500
ax.view_init(elev=15., azim=-70)
ax.set_xlim3d([-xy_radius / 2, xy_radius / 2])
ax.set_zlim3d([-radius / 2, radius / 2])
ax.set_ylim3d([-xy_radius / 2, xy_radius / 2])
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
ax.dist = 8
ax.set_title("Ours") # , pad=35
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.get_zaxis().set_visible(False)
#ax.set_axis_off()
for i in range(17):
col = 'yellowgreen' if i in joints_right else 'midnightblue'
ax.plot([data_3d[i, 0], data_3d[parent[i], 0]], [data_3d[i, 2], data_3d[parent[i], 2]], [-data_3d[i, 1], -data_3d[parent[i], 1]], c=col )
#ax.annotate(s=str(i), x=data_2d[i,0], y=data_2d[i,1]-10,color='white', fontsize='3')
#plt.show()
plt.savefig("./3dhp_test_3d.png", bbox_inches="tight", pad_inches=0.0, dpi=300)
plt.close() | 1,455 | 28.714286 | 141 | py |
P-STMO | P-STMO-main/common/utils_3dhp.py |
def mpii_get_sequence_info(subject_id, sequence):
switcher = {
"1 1": [6416,25],
"1 2": [12430,50],
"2 1": [6502,25],
"2 2": [6081,25],
"3 1": [12488,50],
"3 2": [12283,50],
"4 1": [6171,25],
"4 2": [6675,25],
"5 1": [12820,50],
"5 2": [12312,50],
"6 1": [6188,25],
"6 2": [6145,25],
"7 1": [6239,25],
"7 2": [6320,25],
"8 1": [6468,25],
"8 2": [6054,25],
}
return switcher.get(subject_id+" "+sequence)
| 547 | 20.92 | 49 | py |
P-STMO | P-STMO-main/common/skeleton.py |
import numpy as np
class Skeleton:
def __init__(self, parents, joints_left, joints_right):
assert len(joints_left) == len(joints_right)
self._parents = np.array(parents)
self._joints_left = joints_left
self._joints_right = joints_right
self._compute_metadata()
def num_joints(self):
return len(self._parents)
def parents(self):
return self._parents
def has_children(self):
return self._has_children
def children(self):
return self._children
def remove_joints(self, joints_to_remove):
valid_joints = []
for joint in range(len(self._parents)):
if joint not in joints_to_remove:
valid_joints.append(joint)
for i in range(len(self._parents)):
while self._parents[i] in joints_to_remove:
self._parents[i] = self._parents[self._parents[i]]
index_offsets = np.zeros(len(self._parents), dtype=int)
new_parents = []
for i, parent in enumerate(self._parents):
if i not in joints_to_remove:
new_parents.append(parent - index_offsets[parent])
else:
index_offsets[i:] += 1
self._parents = np.array(new_parents)
if self._joints_left is not None:
new_joints_left = []
for joint in self._joints_left:
if joint in valid_joints:
new_joints_left.append(joint - index_offsets[joint])
self._joints_left = new_joints_left
if self._joints_right is not None:
new_joints_right = []
for joint in self._joints_right:
if joint in valid_joints:
new_joints_right.append(joint - index_offsets[joint])
self._joints_right = new_joints_right
self._compute_metadata()
return valid_joints
def joints_left(self):
return self._joints_left
def joints_right(self):
return self._joints_right
def _compute_metadata(self):
self._has_children = np.zeros(len(self._parents)).astype(bool)
for i, parent in enumerate(self._parents):
if parent != -1:
self._has_children[parent] = True
self._children = []
for i, parent in enumerate(self._parents):
self._children.append([])
for i, parent in enumerate(self._parents):
if parent != -1:
self._children[parent].append(i)
| 2,532 | 29.518072 | 73 | py |
P-STMO | P-STMO-main/common/draw_2d_keypoint_3dhp.py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.image as mpimg
import scipy.io as scio
keypoints = np.load('../dataset/data_test_3dhp.npz',allow_pickle=True)
image = mpimg.imread(r'..\3dhp_test\TS6\imageSequence\img_000061.jpg')
parents=[1,15,1,2,3,1,5,6,14,8,9,14,11,12,-1,14,15]
joints_right_2d=[2, 3, 4, 8, 9, 10]
colors_2d = np.full(17, 'midnightblue')
colors_2d[joints_right_2d] = 'yellowgreen'
data=keypoints['data'].item()
data_sequence = data["TS6"]
valid_frame = data_sequence["valid"].astype(bool)
valid_cnt = 0
image_cnt = 0
for i in range(len(valid_frame)):
if valid_frame[i] == True:
valid_cnt+=1
#TS1:101, TS4:81, TS5:71, TS6:11
if valid_cnt==11:
image_cnt = i
break
#TS1:1040, TS4:960, TS5:70, TS6:60
#equals to image_cnt
test = data_sequence['data_2d'][60]
#TS1:100, TS4:80, TS5:70, TS6:10
#equals to image_cnt-1
data_2d = data_sequence['data_2d'][valid_frame][10]
#data_2d = data["TS3"]['data_2d'][364]
plt.axis("off")
# plt.xlim(0,1000)
# plt.ylim(0,1000)
plt.imshow(image)
for j, j_parent in enumerate(parents):
if j_parent == -1:
continue
plt.plot([data_2d[j, 0], data_2d[j_parent, 0]],
[data_2d[j, 1], data_2d[j_parent, 1]], linewidth=1,color='pink')
plt.scatter(data_2d[:, 0], data_2d[:, 1], 10, color=colors_2d, edgecolors='white', zorder=10)
#plt.show()
plt.savefig("./plot/3dhp_test_2d.png", bbox_inches="tight", pad_inches=0.0, dpi=300)
plt.close()
print("")
| 1,532 | 25.894737 | 93 | py |
P-STMO | P-STMO-main/common/load_data_hm36_tds.py |
import torch.utils.data as data
import numpy as np
from common.utils import deterministic_random
from common.camera import world_to_camera, normalize_screen_coordinates
from common.generator_tds import ChunkedGenerator
class Fusion(data.Dataset):
def __init__(self, opt, dataset, root_path, train=True, MAE=False, tds=1):
self.data_type = opt.dataset
self.train = train
self.keypoints_name = opt.keypoints
self.root_path = root_path
self.train_list = opt.subjects_train.split(',')
self.test_list = opt.subjects_test.split(',')
self.action_filter = None if opt.actions == '*' else opt.actions.split(',')
self.downsample = opt.downsample
self.subset = opt.subset
self.stride = opt.stride
self.crop_uv = opt.crop_uv
self.test_aug = opt.test_augmentation
self.pad = opt.pad
self.MAE=MAE
if self.train:
self.keypoints = self.prepare_data(dataset, self.train_list)
self.cameras_train, self.poses_train, self.poses_train_2d = self.fetch(dataset, self.train_list,
subset=self.subset)
self.generator = ChunkedGenerator(opt.batchSize // opt.stride, self.cameras_train, self.poses_train,
self.poses_train_2d, self.stride, pad=self.pad,
augment=opt.data_augmentation, reverse_aug=opt.reverse_augmentation,
kps_left=self.kps_left, kps_right=self.kps_right,
joints_left=self.joints_left,
joints_right=self.joints_right, out_all=opt.out_all, MAE=MAE, tds=tds)
print('INFO: Training on {} frames'.format(self.generator.num_frames()))
else:
self.keypoints = self.prepare_data(dataset, self.test_list)
self.cameras_test, self.poses_test, self.poses_test_2d = self.fetch(dataset, self.test_list,
subset=self.subset)
self.generator = ChunkedGenerator(opt.batchSize // opt.stride, self.cameras_test, self.poses_test,
self.poses_test_2d,
pad=self.pad, augment=False, kps_left=self.kps_left,
kps_right=self.kps_right, joints_left=self.joints_left,
joints_right=self.joints_right, MAE=MAE, tds=tds)
self.key_index = self.generator.saved_index
print('INFO: Testing on {} frames'.format(self.generator.num_frames()))
def prepare_data(self, dataset, folder_list):
for subject in folder_list:
for action in dataset[subject].keys():
anim = dataset[subject][action]
positions_3d = []
for cam in anim['cameras']:
pos_3d = world_to_camera(anim['positions'], R=cam['orientation'], t=cam['translation'])
pos_3d[:, 1:] -= pos_3d[:, :1]
if self.keypoints_name.startswith('sh'):
pos_3d = np.delete(pos_3d,obj=9,axis=1)
positions_3d.append(pos_3d)
anim['positions_3d'] = positions_3d
keypoints = np.load(self.root_path + 'data_2d_' + self.data_type + '_' + self.keypoints_name + '.npz',allow_pickle=True)
keypoints_symmetry = keypoints['metadata'].item()['keypoints_symmetry']
self.kps_left, self.kps_right = list(keypoints_symmetry[0]), list(keypoints_symmetry[1])
self.joints_left, self.joints_right = list(dataset.skeleton().joints_left()), list(dataset.skeleton().joints_right())
keypoints = keypoints['positions_2d'].item()
for subject in folder_list:
assert subject in keypoints, 'Subject {} is missing from the 2D detections dataset'.format(subject)
for action in dataset[subject].keys():
assert action in keypoints[
subject], 'Action {} of subject {} is missing from the 2D detections dataset'.format(action,
subject)
for cam_idx in range(len(keypoints[subject][action])):
mocap_length = dataset[subject][action]['positions_3d'][cam_idx].shape[0]
assert keypoints[subject][action][cam_idx].shape[0] >= mocap_length
if keypoints[subject][action][cam_idx].shape[0] > mocap_length:
keypoints[subject][action][cam_idx] = keypoints[subject][action][cam_idx][:mocap_length]
for subject in keypoints.keys():
for action in keypoints[subject]:
for cam_idx, kps in enumerate(keypoints[subject][action]):
cam = dataset.cameras()[subject][cam_idx]
if self.crop_uv == 0:
kps[..., :2] = normalize_screen_coordinates(kps[..., :2], w=cam['res_w'], h=cam['res_h'])
keypoints[subject][action][cam_idx] = kps
return keypoints
def fetch(self, dataset, subjects, subset=1, parse_3d_poses=True):
out_poses_3d = {}
out_poses_2d = {}
out_camera_params = {}
for subject in subjects:
for action in self.keypoints[subject].keys():
if self.action_filter is not None:
found = False
for a in self.action_filter:
if action.startswith(a):
found = True
break
if not found:
continue
poses_2d = self.keypoints[subject][action]
for i in range(len(poses_2d)):
out_poses_2d[(subject, action, i)] = poses_2d[i]
if subject in dataset.cameras():
cams = dataset.cameras()[subject]
assert len(cams) == len(poses_2d), 'Camera count mismatch'
for i, cam in enumerate(cams):
if 'intrinsic' in cam:
out_camera_params[(subject, action, i)] = cam['intrinsic']
if parse_3d_poses and 'positions_3d' in dataset[subject][action]:
poses_3d = dataset[subject][action]['positions_3d']
assert len(poses_3d) == len(poses_2d), 'Camera count mismatch'
for i in range(len(poses_3d)):
out_poses_3d[(subject, action, i)] = poses_3d[i]
if len(out_camera_params) == 0:
out_camera_params = None
if len(out_poses_3d) == 0:
out_poses_3d = None
stride = self.downsample
if subset < 1:
for key in out_poses_2d.keys():
n_frames = int(round(len(out_poses_2d[key]) // stride * subset) * stride)
start = deterministic_random(0, len(out_poses_2d[key]) - n_frames + 1, str(len(out_poses_2d[key])))
out_poses_2d[key] = out_poses_2d[key][start:start + n_frames:stride]
if out_poses_3d is not None:
out_poses_3d[key] = out_poses_3d[key][start:start + n_frames:stride]
elif stride > 1:
for key in out_poses_2d.keys():
out_poses_2d[key] = out_poses_2d[key][::stride]
if out_poses_3d is not None:
out_poses_3d[key] = out_poses_3d[key][::stride]
return out_camera_params, out_poses_3d, out_poses_2d
def __len__(self):
return len(self.generator.pairs)
#return 200
def __getitem__(self, index):
seq_name, start_3d, end_3d, flip, reverse = self.generator.pairs[index]
if self.MAE:
cam, input_2D, action, subject, cam_ind = self.generator.get_batch(seq_name, start_3d, end_3d, flip,
reverse)
if self.train == False and self.test_aug:
_, input_2D_aug, _, _,_ = self.generator.get_batch(seq_name, start_3d, end_3d, flip=True, reverse=reverse)
input_2D = np.concatenate((np.expand_dims(input_2D,axis=0),np.expand_dims(input_2D_aug,axis=0)),0)
else:
cam, gt_3D, input_2D, action, subject, cam_ind = self.generator.get_batch(seq_name, start_3d, end_3d, flip, reverse)
if self.train == False and self.test_aug:
_, _, input_2D_aug, _, _,_ = self.generator.get_batch(seq_name, start_3d, end_3d, flip=True, reverse=reverse)
input_2D = np.concatenate((np.expand_dims(input_2D,axis=0),np.expand_dims(input_2D_aug,axis=0)),0)
bb_box = np.array([0, 0, 1, 1])
input_2D_update = input_2D
scale = np.float(1.0)
if self.MAE:
return cam, input_2D_update, action, subject, scale, bb_box, cam_ind
else:
return cam, gt_3D, input_2D_update, action, subject, scale, bb_box, cam_ind
| 9,325 | 50.241758 | 128 | py |
P-STMO | P-STMO-main/in_the_wild/generators.py | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from itertools import zip_longest
import numpy as np
class ChunkedGenerator:
"""
Batched data generator, used for training.
The sequences are split into equal-length chunks and padded as necessary.
Arguments:
batch_size -- the batch size to use for training
cameras -- list of cameras, one element for each video (optional, used for semi-supervised training)
poses_3d -- list of ground-truth 3D poses, one element for each video (optional, used for supervised training)
poses_2d -- list of input 2D keypoints, one element for each video
chunk_length -- number of output frames to predict for each training example (usually 1)
pad -- 2D input padding to compensate for valid convolutions, per side (depends on the receptive field)
causal_shift -- asymmetric padding offset when causal convolutions are used (usually 0 or "pad")
shuffle -- randomly shuffle the dataset before each epoch
random_seed -- initial seed to use for the random generator
augment -- augment the dataset by flipping poses horizontally
kps_left and kps_right -- list of left/right 2D keypoints if flipping is enabled
joints_left and joints_right -- list of left/right 3D joints if flipping is enabled
"""
def __init__(self, batch_size, cameras, poses_3d, poses_2d,
chunk_length, pad=0, causal_shift=0,
shuffle=True, random_seed=1234,
augment=False, kps_left=None, kps_right=None, joints_left=None, joints_right=None,
endless=False):
assert poses_3d is None or len(poses_3d) == len(poses_2d), (len(poses_3d), len(poses_2d))
assert cameras is None or len(cameras) == len(poses_2d)
# Build lineage info
pairs = [] # (seq_idx, start_frame, end_frame, flip) tuples
for i in range(len(poses_2d)):
assert poses_3d is None or poses_3d[i].shape[0] == poses_3d[i].shape[0]
n_chunks = (poses_2d[i].shape[0] + chunk_length - 1) // chunk_length
offset = (n_chunks * chunk_length - poses_2d[i].shape[0]) // 2
bounds = np.arange(n_chunks + 1) * chunk_length - offset
augment_vector = np.full(len(bounds - 1), False, dtype=bool)
pairs += zip(np.repeat(i, len(bounds - 1)), bounds[:-1], bounds[1:], augment_vector)
if augment:
pairs += zip(np.repeat(i, len(bounds - 1)), bounds[:-1], bounds[1:], ~augment_vector)
# Initialize buffers
if cameras is not None:
self.batch_cam = np.empty((batch_size, cameras[0].shape[-1]))
if poses_3d is not None:
self.batch_3d = np.empty((batch_size, chunk_length, poses_3d[0].shape[-2], poses_3d[0].shape[-1]))
self.batch_2d = np.empty((batch_size, chunk_length + 2 * pad, poses_2d[0].shape[-2], poses_2d[0].shape[-1]))
self.num_batches = (len(pairs) + batch_size - 1) // batch_size
self.batch_size = batch_size
self.random = np.random.RandomState(random_seed)
self.pairs = pairs
self.shuffle = shuffle
self.pad = pad
self.causal_shift = causal_shift
self.endless = endless
self.state = None
self.cameras = cameras
self.poses_3d = poses_3d
self.poses_2d = poses_2d
self.augment = augment
self.kps_left = kps_left
self.kps_right = kps_right
self.joints_left = joints_left
self.joints_right = joints_right
def num_frames(self):
return self.num_batches * self.batch_size
def random_state(self):
return self.random
def set_random_state(self, random):
self.random = random
def augment_enabled(self):
return self.augment
def next_pairs(self):
if self.state is None:
if self.shuffle:
pairs = self.random.permutation(self.pairs)
else:
pairs = self.pairs
return 0, pairs
else:
return self.state
def next_epoch(self):
enabled = True
while enabled:
start_idx, pairs = self.next_pairs()
for b_i in range(start_idx, self.num_batches):
chunks = pairs[b_i * self.batch_size: (b_i + 1) * self.batch_size]
for i, (seq_i, start_3d, end_3d, flip) in enumerate(chunks):
start_2d = start_3d - self.pad - self.causal_shift
end_2d = end_3d + self.pad - self.causal_shift
# 2D poses
seq_2d = self.poses_2d[seq_i]
low_2d = max(start_2d, 0)
high_2d = min(end_2d, seq_2d.shape[0])
pad_left_2d = low_2d - start_2d
pad_right_2d = end_2d - high_2d
if pad_left_2d != 0 or pad_right_2d != 0:
self.batch_2d[i] = np.pad(seq_2d[low_2d:high_2d], ((pad_left_2d, pad_right_2d), (0, 0), (0, 0)), 'edge')
else:
self.batch_2d[i] = seq_2d[low_2d:high_2d]
if flip:
# Flip 2D keypoints
self.batch_2d[i, :, :, 0] *= -1
self.batch_2d[i, :, self.kps_left + self.kps_right] = self.batch_2d[i, :, self.kps_right + self.kps_left]
# 3D poses
if self.poses_3d is not None:
seq_3d = self.poses_3d[seq_i]
low_3d = max(start_3d, 0)
high_3d = min(end_3d, seq_3d.shape[0])
pad_left_3d = low_3d - start_3d
pad_right_3d = end_3d - high_3d
if pad_left_3d != 0 or pad_right_3d != 0:
self.batch_3d[i] = np.pad(seq_3d[low_3d:high_3d], ((pad_left_3d, pad_right_3d), (0, 0), (0, 0)), 'edge')
else:
self.batch_3d[i] = seq_3d[low_3d:high_3d]
if flip:
# Flip 3D joints
self.batch_3d[i, :, :, 0] *= -1
self.batch_3d[i, :, self.joints_left + self.joints_right] = \
self.batch_3d[i, :, self.joints_right + self.joints_left]
# Cameras
if self.cameras is not None:
self.batch_cam[i] = self.cameras[seq_i]
if flip:
# Flip horizontal distortion coefficients
self.batch_cam[i, 2] *= -1
self.batch_cam[i, 7] *= -1
if self.endless:
self.state = (b_i + 1, pairs)
if self.poses_3d is None and self.cameras is None:
yield None, None, self.batch_2d[:len(chunks)]
elif self.poses_3d is not None and self.cameras is None:
yield None, self.batch_3d[:len(chunks)], self.batch_2d[:len(chunks)]
elif self.poses_3d is None:
yield self.batch_cam[:len(chunks)], None, self.batch_2d[:len(chunks)]
else:
yield self.batch_cam[:len(chunks)], self.batch_3d[:len(chunks)], self.batch_2d[:len(chunks)]
if self.endless:
self.state = None
else:
enabled = False
class UnchunkedGenerator:
"""
Non-batched data generator, used for testing.
Sequences are returned one at a time (i.e. batch size = 1), without chunking.
If data augmentation is enabled, the batches contain two sequences (i.e. batch size = 2),
the second of which is a mirrored version of the first.
Arguments:
cameras -- list of cameras, one element for each video (optional, used for semi-supervised training)
poses_3d -- list of ground-truth 3D poses, one element for each video (optional, used for supervised training)
poses_2d -- list of input 2D keypoints, one element for each video
pad -- 2D input padding to compensate for valid convolutions, per side (depends on the receptive field)
causal_shift -- asymmetric padding offset when causal convolutions are used (usually 0 or "pad")
augment -- augment the dataset by flipping poses horizontally
kps_left and kps_right -- list of left/right 2D keypoints if flipping is enabled
joints_left and joints_right -- list of left/right 3D joints if flipping is enabled
"""
def __init__(self, cameras, poses_3d, poses_2d, pad=0, causal_shift=0,
augment=False, kps_left=None, kps_right=None, joints_left=None, joints_right=None):
assert poses_3d is None or len(poses_3d) == len(poses_2d)
assert cameras is None or len(cameras) == len(poses_2d)
self.augment = augment
self.kps_left = kps_left
self.kps_right = kps_right
self.joints_left = joints_left
self.joints_right = joints_right
self.pad = pad
self.causal_shift = causal_shift
self.cameras = [] if cameras is None else cameras
self.poses_3d = [] if poses_3d is None else poses_3d
self.poses_2d = poses_2d
def num_frames(self):
count = 0
for p in self.poses_2d:
count += p.shape[0]
return count
def augment_enabled(self):
return self.augment
def set_augment(self, augment):
self.augment = augment
def next_epoch(self):
for seq_cam, seq_3d, seq_2d in zip_longest(self.cameras, self.poses_3d, self.poses_2d):
batch_cam = None if seq_cam is None else np.expand_dims(seq_cam, axis=0)
batch_3d = None if seq_3d is None else np.expand_dims(seq_3d, axis=0)
# 2D input padding to compensate for valid convolutions, per side (depends on the receptive field)
batch_2d = np.expand_dims(np.pad(seq_2d,
((self.pad + self.causal_shift, self.pad - self.causal_shift), (0, 0), (0, 0)),
'edge'), axis=0)
if self.augment:
# Append flipped version
if batch_cam is not None:
batch_cam = np.concatenate((batch_cam, batch_cam), axis=0)
batch_cam[1, 2] *= -1
batch_cam[1, 7] *= -1
if batch_3d is not None:
batch_3d = np.concatenate((batch_3d, batch_3d), axis=0)
batch_3d[1, :, :, 0] *= -1
batch_3d[1, :, self.joints_left + self.joints_right] = batch_3d[1, :, self.joints_right + self.joints_left]
batch_2d = np.concatenate((batch_2d, batch_2d), axis=0)
batch_2d[1, :, :, 0] *= -1
batch_2d[1, :, self.kps_left + self.kps_right] = batch_2d[1, :, self.kps_right + self.kps_left]
yield batch_cam, batch_3d, batch_2d
class Evaluate_Generator:
"""
Batched data generator, used for training.
The sequences are split into equal-length chunks and padded as necessary.
Arguments:
batch_size -- the batch size to use for training
cameras -- list of cameras, one element for each video (optional, used for semi-supervised training)
poses_3d -- list of ground-truth 3D poses, one element for each video (optional, used for supervised training)
poses_2d -- list of input 2D keypoints, one element for each video
chunk_length -- number of output frames to predict for each training example (usually 1)
pad -- 2D input padding to compensate for valid convolutions, per side (depends on the receptive field)
causal_shift -- asymmetric padding offset when causal convolutions are used (usually 0 or "pad")
shuffle -- randomly shuffle the dataset before each epoch
random_seed -- initial seed to use for the random generator
augment -- augment the dataset by flipping poses horizontally
kps_left and kps_right -- list of left/right 2D keypoints if flipping is enabled
joints_left and joints_right -- list of left/right 3D joints if flipping is enabled
"""
def __init__(self, batch_size, cameras, poses_3d, poses_2d,
chunk_length, pad=0, causal_shift=0,
shuffle=True, random_seed=1234,
augment=False, kps_left=None, kps_right=None, joints_left=None, joints_right=None,
endless=False):
assert poses_3d is None or len(poses_3d) == len(poses_2d), (len(poses_3d), len(poses_2d))
assert cameras is None or len(cameras) == len(poses_2d)
# Build lineage info
pairs = [] # (seq_idx, start_frame, end_frame, flip) tuples
for i in range(len(poses_2d)):
assert poses_3d is None or poses_3d[i].shape[0] == poses_3d[i].shape[0]
n_chunks = (poses_2d[i].shape[0] + chunk_length - 1) // chunk_length
offset = (n_chunks * chunk_length - poses_2d[i].shape[0]) // 2
bounds = np.arange(n_chunks + 1) * chunk_length - offset
augment_vector = np.full(len(bounds - 1), False, dtype=bool)
pairs += zip(np.repeat(i, len(bounds - 1)), bounds[:-1], bounds[1:], augment_vector)
# Initialize buffers
if cameras is not None:
self.batch_cam = np.empty((batch_size, cameras[0].shape[-1]))
if poses_3d is not None:
self.batch_3d = np.empty((batch_size, chunk_length, poses_3d[0].shape[-2], poses_3d[0].shape[-1]))
if augment:
self.batch_2d_flip = np.empty(
(batch_size, chunk_length + 2 * pad, poses_2d[0].shape[-2], poses_2d[0].shape[-1]))
self.batch_2d = np.empty((batch_size, chunk_length + 2 * pad, poses_2d[0].shape[-2], poses_2d[0].shape[-1]))
else:
self.batch_2d = np.empty((batch_size, chunk_length + 2 * pad, poses_2d[0].shape[-2], poses_2d[0].shape[-1]))
self.num_batches = (len(pairs) + batch_size - 1) // batch_size
self.batch_size = batch_size
self.random = np.random.RandomState(random_seed)
self.pairs = pairs
self.shuffle = shuffle
self.pad = pad
self.causal_shift = causal_shift
self.endless = endless
self.state = None
self.cameras = cameras
self.poses_3d = poses_3d
self.poses_2d = poses_2d
self.augment = augment
self.kps_left = kps_left
self.kps_right = kps_right
self.joints_left = joints_left
self.joints_right = joints_right
def num_frames(self):
return self.num_batches * self.batch_size
def random_state(self):
return self.random
def set_random_state(self, random):
self.random = random
def augment_enabled(self):
return self.augment
def next_pairs(self):
if self.state is None:
if self.shuffle:
pairs = self.random.permutation(self.pairs)
else:
pairs = self.pairs
return 0, pairs
else:
return self.state
def next_epoch(self):
enabled = True
while enabled:
start_idx, pairs = self.next_pairs()
for b_i in range(start_idx, self.num_batches):
chunks = pairs[b_i * self.batch_size: (b_i + 1) * self.batch_size]
for i, (seq_i, start_3d, end_3d, flip) in enumerate(chunks):
start_2d = start_3d - self.pad - self.causal_shift
end_2d = end_3d + self.pad - self.causal_shift
# 2D poses
seq_2d = self.poses_2d[seq_i]
low_2d = max(start_2d, 0)
high_2d = min(end_2d, seq_2d.shape[0])
pad_left_2d = low_2d - start_2d
pad_right_2d = end_2d - high_2d
if pad_left_2d != 0 or pad_right_2d != 0:
self.batch_2d[i] = np.pad(seq_2d[low_2d:high_2d], ((pad_left_2d, pad_right_2d), (0, 0), (0, 0)),
'edge')
if self.augment:
self.batch_2d_flip[i] = np.pad(seq_2d[low_2d:high_2d],
((pad_left_2d, pad_right_2d), (0, 0), (0, 0)),
'edge')
else:
self.batch_2d[i] = seq_2d[low_2d:high_2d]
if self.augment:
self.batch_2d_flip[i] = seq_2d[low_2d:high_2d]
if self.augment:
self.batch_2d_flip[i, :, :, 0] *= -1
self.batch_2d_flip[i, :, self.kps_left + self.kps_right] = self.batch_2d_flip[i, :,
self.kps_right + self.kps_left]
# 3D poses
if self.poses_3d is not None:
seq_3d = self.poses_3d[seq_i]
low_3d = max(start_3d, 0)
high_3d = min(end_3d, seq_3d.shape[0])
pad_left_3d = low_3d - start_3d
pad_right_3d = end_3d - high_3d
if pad_left_3d != 0 or pad_right_3d != 0:
self.batch_3d[i] = np.pad(seq_3d[low_3d:high_3d],
((pad_left_3d, pad_right_3d), (0, 0), (0, 0)), 'edge')
else:
self.batch_3d[i] = seq_3d[low_3d:high_3d]
if flip:
self.batch_3d[i, :, :, 0] *= -1
self.batch_3d[i, :, self.joints_left + self.joints_right] = \
self.batch_3d[i, :, self.joints_right + self.joints_left]
# Cameras
if self.cameras is not None:
self.batch_cam[i] = self.cameras[seq_i]
if flip:
# Flip horizontal distortion coefficients
self.batch_cam[i, 2] *= -1
self.batch_cam[i, 7] *= -1
if self.endless:
self.state = (b_i + 1, pairs)
if self.augment:
if self.poses_3d is None and self.cameras is None:
yield None, None, self.batch_2d[:len(chunks)], self.batch_2d_flip[:len(chunks)]
elif self.poses_3d is not None and self.cameras is None:
yield None, self.batch_3d[:len(chunks)], self.batch_2d[:len(chunks)], self.batch_2d_flip[
:len(chunks)]
elif self.poses_3d is None:
yield self.batch_cam[:len(chunks)], None, self.batch_2d[:len(chunks)], self.batch_2d_flip[
:len(chunks)]
else:
yield self.batch_cam[:len(chunks)], self.batch_3d[:len(chunks)], self.batch_2d[:len(
chunks)], self.batch_2d_flip[:len(chunks)]
else:
if self.poses_3d is None and self.cameras is None:
yield None, None, self.batch_2d[:len(chunks)]
elif self.poses_3d is not None and self.cameras is None:
yield None, self.batch_3d[:len(chunks)], self.batch_2d[:len(chunks)]
elif self.poses_3d is None:
yield self.batch_cam[:len(chunks)], None, self.batch_2d[:len(chunks)]
else:
yield self.batch_cam[:len(chunks)], self.batch_3d[:len(chunks)], self.batch_2d[:len(chunks)]
if self.endless:
self.state = None
else:
enabled = False | 20,264 | 46.682353 | 132 | py |
P-STMO | P-STMO-main/in_the_wild/arguments.py | # Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
def parse_args():
parser = argparse.ArgumentParser(description='Training script')
# General arguments
parser.add_argument('-d', '--dataset', default='h36m', type=str, metavar='NAME', help='target dataset') # h36m or humaneva
parser.add_argument('-k', '--keypoints', default='cpn_ft_h36m_dbb', type=str, metavar='NAME', help='2D detections to use')
parser.add_argument('-str', '--subjects-train', default='S1,S5,S6,S7,S8', type=str, metavar='LIST',
help='training subjects separated by comma')
parser.add_argument('-ste', '--subjects-test', default='S9,S11', type=str, metavar='LIST', help='test subjects separated by comma')
parser.add_argument('-sun', '--subjects-unlabeled', default='', type=str, metavar='LIST',
help='unlabeled subjects separated by comma for self-supervision')
parser.add_argument('-a', '--actions', default='*', type=str, metavar='LIST',
help='actions to train/test on, separated by comma, or * for all')
parser.add_argument('-c', '--checkpoint', default='checkpoint', type=str, metavar='PATH',
help='checkpoint directory')
parser.add_argument('--checkpoint-frequency', default=10, type=int, metavar='N',
help='create a checkpoint every N epochs')
parser.add_argument('-r', '--resume', default='', type=str, metavar='FILENAME',
help='checkpoint to resume (file name)')
parser.add_argument('--evaluate', default='pretrained_h36m_detectron_coco.bin', type=str, metavar='FILENAME', help='checkpoint to evaluate (file name)')
parser.add_argument('--render', action='store_true', help='visualize a particular video')
parser.add_argument('--by-subject', action='store_true', help='break down error by subject (on evaluation)')
parser.add_argument('--export-training-curves', action='store_true', help='save training curves as .png images')
# Model arguments
parser.add_argument('-s', '--stride', default=1, type=int, metavar='N', help='chunk size to use during training')
parser.add_argument('-e', '--epochs', default=60, type=int, metavar='N', help='number of training epochs')
parser.add_argument('-b', '--batch-size', default=1024, type=int, metavar='N', help='batch size in terms of predicted frames')
parser.add_argument('-drop', '--dropout', default=0.25, type=float, metavar='P', help='dropout probability')
parser.add_argument('-lr', '--learning-rate', default=0.001, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('-lrd', '--lr-decay', default=0.95, type=float, metavar='LR', help='learning rate decay per epoch')
parser.add_argument('-no-da', '--no-data-augmentation', dest='data_augmentation', action='store_false',
help='disable train-time flipping')
parser.add_argument('-no-tta', '--no-test-time-augmentation', dest='test_time_augmentation', action='store_false',
help='disable test-time flipping')
parser.add_argument('-arc', '--architecture', default='3,3,3,3,3', type=str, metavar='LAYERS', help='filter widths separated by comma')
parser.add_argument('--causal', action='store_true', help='use causal convolutions for real-time processing')
parser.add_argument('-ch', '--channels', default=1024, type=int, metavar='N', help='number of channels in convolution layers')
# Experimental
parser.add_argument('--subset', default=1, type=float, metavar='FRACTION', help='reduce dataset size by fraction')
parser.add_argument('--downsample', default=1, type=int, metavar='FACTOR', help='downsample frame rate by factor (semi-supervised)')
parser.add_argument('--warmup', default=1, type=int, metavar='N', help='warm-up epochs for semi-supervision')
parser.add_argument('--no-eval', action='store_true', help='disable epoch evaluation while training (small speed-up)')
parser.add_argument('--dense', action='store_true', help='use dense convolutions instead of dilated convolutions')
parser.add_argument('--disable-optimizations', action='store_true', help='disable optimized model for single-frame predictions')
parser.add_argument('--linear-projection', action='store_true', help='use only linear coefficients for semi-supervised projection')
parser.add_argument('--no-bone-length', action='store_false', dest='bone_length_term',
help='disable bone length term in semi-supervised settings')
parser.add_argument('--no-proj', action='store_true', help='disable projection for semi-supervised setting')
# Visualization
parser.add_argument('--viz-subject', type=str, metavar='STR', help='subject to render')
parser.add_argument('--viz-action', type=str, metavar='STR', help='action to render')
parser.add_argument('--viz-camera', type=int, default=0, metavar='N', help='camera to render')
parser.add_argument('--viz-video', type=str, metavar='PATH', help='path to input video')
parser.add_argument('--viz-skip', type=int, default=0, metavar='N', help='skip first N frames of input video')
parser.add_argument('--viz-output', type=str, metavar='PATH', help='output file name (.gif or .mp4)')
parser.add_argument('--viz-bitrate', type=int, default=30000, metavar='N', help='bitrate for mp4 videos')
parser.add_argument('--viz-no-ground-truth', action='store_true', help='do not show ground-truth poses')
parser.add_argument('--viz-limit', type=int, default=-1, metavar='N', help='only render first N frames')
parser.add_argument('--viz-downsample', type=int, default=1, metavar='N', help='downsample FPS by a factor N')
parser.add_argument('--viz-size', type=int, default=5, metavar='N', help='image size')
# self add
parser.add_argument('-in2d','--input_npz', type=str, default='', help='input 2d numpy file')
parser.add_argument('--video', dest='input_video', type=str, default='', help='input video name')
parser.add_argument('--layers', default=3, type=int)
parser.add_argument('--channel', default=256, type=int)
parser.add_argument('--d_hid', default=512, type=int)
parser.add_argument('-f', '--frames', type=int, default=243)
parser.add_argument('--n_joints', type=int, default=17)
parser.add_argument('--out_joints', type=int, default=17)
parser.add_argument('--in_channels', type=int, default=2)
parser.add_argument('--out_channels', type=int, default=3)
parser.add_argument('--stride_num', type=list, default=[3, 3, 3, 3, 3])
parser.set_defaults(bone_length_term=True)
parser.set_defaults(data_augmentation=True)
parser.set_defaults(test_time_augmentation=True)
args = parser.parse_args()
# Check invalid configuration
if args.resume and args.evaluate:
print('Invalid flags: --resume and --evaluate cannot be set at the same time')
exit()
if args.export_training_curves and args.no_eval:
print('Invalid flags: --export-training-curves and --no-eval cannot be set at the same time')
exit()
return args
| 7,306 | 69.941748 | 156 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.