repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
DEAT | DEAT-main/models/pnasnet.py | '''PNASNet in PyTorch.
Paper: Progressive Neural Architecture Search
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class SepConv(nn.Module):
'''Separable Convolution.'''
def __init__(self, in_planes, out_planes, kernel_size, stride):
super(SepConv, self).__init__()
self.conv1 = nn.Conv2d(in_planes, out_planes,
kernel_size, stride,
padding=(kernel_size-1)//2,
bias=False, groups=in_planes)
self.bn1 = nn.BatchNorm2d(out_planes)
def forward(self, x):
return self.bn1(self.conv1(x))
class CellA(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(CellA, self).__init__()
self.stride = stride
self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7, stride=stride)
if stride==2:
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(out_planes)
def forward(self, x):
y1 = self.sep_conv1(x)
y2 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1)
if self.stride==2:
y2 = self.bn1(self.conv1(y2))
return F.relu(y1+y2)
class CellB(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(CellB, self).__init__()
self.stride = stride
# Left branch
self.sep_conv1 = SepConv(in_planes, out_planes, kernel_size=7, stride=stride)
self.sep_conv2 = SepConv(in_planes, out_planes, kernel_size=3, stride=stride)
# Right branch
self.sep_conv3 = SepConv(in_planes, out_planes, kernel_size=5, stride=stride)
if stride==2:
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(out_planes)
# Reduce channels
self.conv2 = nn.Conv2d(2*out_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
def forward(self, x):
# Left branch
y1 = self.sep_conv1(x)
y2 = self.sep_conv2(x)
# Right branch
y3 = F.max_pool2d(x, kernel_size=3, stride=self.stride, padding=1)
if self.stride==2:
y3 = self.bn1(self.conv1(y3))
y4 = self.sep_conv3(x)
# Concat & reduce channels
b1 = F.relu(y1+y2)
b2 = F.relu(y3+y4)
y = torch.cat([b1,b2], 1)
return F.relu(self.bn2(self.conv2(y)))
class PNASNet(nn.Module):
def __init__(self, cell_type, num_cells, num_planes):
super(PNASNet, self).__init__()
self.in_planes = num_planes
self.cell_type = cell_type
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_planes)
self.layer1 = self._make_layer(num_planes, num_cells=6)
self.layer2 = self._downsample(num_planes*2)
self.layer3 = self._make_layer(num_planes*2, num_cells=6)
self.layer4 = self._downsample(num_planes*4)
self.layer5 = self._make_layer(num_planes*4, num_cells=6)
self.linear = nn.Linear(num_planes*4, 10)
def _make_layer(self, planes, num_cells):
layers = []
for _ in range(num_cells):
layers.append(self.cell_type(self.in_planes, planes, stride=1))
self.in_planes = planes
return nn.Sequential(*layers)
def _downsample(self, planes):
layer = self.cell_type(self.in_planes, planes, stride=2)
self.in_planes = planes
return layer
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.layer5(out)
out = F.avg_pool2d(out, 8)
out = self.linear(out.view(out.size(0), -1))
return out
def PNASNetA():
return PNASNet(CellA, num_cells=6, num_planes=44)
def PNASNetB():
return PNASNet(CellB, num_cells=6, num_planes=32)
def test():
net = PNASNetB()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
| 4,258 | 32.801587 | 105 | py |
DEAT | DEAT-main/models/resnet.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion *
planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
def ResNet34():
return ResNet(BasicBlock, [3, 4, 6, 3])
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
def ResNet101():
return ResNet(Bottleneck, [3, 4, 23, 3])
def ResNet152():
return ResNet(Bottleneck, [3, 8, 36, 3])
def test():
net = ResNet18()
y = net(torch.randn(1, 3, 32, 32))
print(y.size())
# test()
| 4,218 | 30.721805 | 83 | py |
DEAT | DEAT-main/models/mobilenetv2.py | '''MobileNetV2 in PyTorch.
See the paper "Inverted Residuals and Linear Bottlenecks:
Mobile Networks for Classification, Detection and Segmentation" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''expand + depthwise + pointwise'''
def __init__(self, in_planes, out_planes, expansion, stride):
super(Block, self).__init__()
self.stride = stride
planes = expansion * in_planes
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 1 and in_planes != out_planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out = out + self.shortcut(x) if self.stride==1 else out
return out
class MobileNetV2(nn.Module):
# (expansion, out_planes, num_blocks, stride)
cfg = [(1, 16, 1, 1),
(6, 24, 2, 1), # NOTE: change stride 2 -> 1 for CIFAR10
(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1),
(6, 160, 3, 2),
(6, 320, 1, 1)]
def __init__(self, num_classes=10):
super(MobileNetV2, self).__init__()
# NOTE: change conv1 stride 2 -> 1 for CIFAR10
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(1280)
self.linear = nn.Linear(1280, num_classes)
def _make_layers(self, in_planes):
layers = []
for expansion, out_planes, num_blocks, stride in self.cfg:
strides = [stride] + [1]*(num_blocks-1)
for stride in strides:
layers.append(Block(in_planes, out_planes, expansion, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.relu(self.bn2(self.conv2(out)))
# NOTE: change pooling kernel_size 7 -> 4 for CIFAR10
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = MobileNetV2()
x = torch.randn(2,3,32,32)
y = net(x)
print(y.size())
# test()
| 3,092 | 34.551724 | 114 | py |
DEAT | DEAT-main/models/vgg.py | '''VGG11/13/16/19 in Pytorch.'''
import torch
import torch.nn as nn
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, vgg_name):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 10)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
def test():
net = VGG('VGG11')
x = torch.randn(2,3,32,32)
y = net(x)
print(y.size())
# test()
| 1,442 | 29.0625 | 117 | py |
DEAT | DEAT-main/models/densenet.py | '''DenseNet in PyTorch.'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(4*growth_rate)
self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out,x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = 2*growth_rate
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += nblocks[0]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += nblocks[1]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += nblocks[2]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += nblocks[3]*growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DenseNet121():
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=32)
def DenseNet169():
return DenseNet(Bottleneck, [6,12,32,32], growth_rate=32)
def DenseNet201():
return DenseNet(Bottleneck, [6,12,48,32], growth_rate=32)
def DenseNet161():
return DenseNet(Bottleneck, [6,12,36,24], growth_rate=48)
def densenet_cifar():
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=12)
def test():
net = densenet_cifar()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
| 3,542 | 31.805556 | 96 | py |
DEAT | DEAT-main/models/googlenet.py | '''GoogLeNet with PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Inception(nn.Module):
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
super(Inception, self).__init__()
# 1x1 conv branch
self.b1 = nn.Sequential(
nn.Conv2d(in_planes, n1x1, kernel_size=1),
nn.BatchNorm2d(n1x1),
nn.ReLU(True),
)
# 1x1 conv -> 3x3 conv branch
self.b2 = nn.Sequential(
nn.Conv2d(in_planes, n3x3red, kernel_size=1),
nn.BatchNorm2d(n3x3red),
nn.ReLU(True),
nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1),
nn.BatchNorm2d(n3x3),
nn.ReLU(True),
)
# 1x1 conv -> 5x5 conv branch
self.b3 = nn.Sequential(
nn.Conv2d(in_planes, n5x5red, kernel_size=1),
nn.BatchNorm2d(n5x5red),
nn.ReLU(True),
nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
)
# 3x3 pool -> 1x1 conv branch
self.b4 = nn.Sequential(
nn.MaxPool2d(3, stride=1, padding=1),
nn.Conv2d(in_planes, pool_planes, kernel_size=1),
nn.BatchNorm2d(pool_planes),
nn.ReLU(True),
)
def forward(self, x):
y1 = self.b1(x)
y2 = self.b2(x)
y3 = self.b3(x)
y4 = self.b4(x)
return torch.cat([y1,y2,y3,y4], 1)
class GoogLeNet(nn.Module):
def __init__(self):
super(GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(
nn.Conv2d(3, 192, kernel_size=3, padding=1),
nn.BatchNorm2d(192),
nn.ReLU(True),
)
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.linear = nn.Linear(1024, 10)
def forward(self, x):
out = self.pre_layers(x)
out = self.a3(out)
out = self.b3(out)
out = self.maxpool(out)
out = self.a4(out)
out = self.b4(out)
out = self.c4(out)
out = self.d4(out)
out = self.e4(out)
out = self.maxpool(out)
out = self.a5(out)
out = self.b5(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = GoogLeNet()
x = torch.randn(1,3,32,32)
y = net(x)
print(y.size())
# test()
| 3,221 | 28.833333 | 83 | py |
DEAT | DEAT-main/models/resnext.py | '''ResNeXt in PyTorch.
See the paper "Aggregated Residual Transformations for Deep Neural Networks" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''Grouped convolution block.'''
expansion = 2
def __init__(self, in_planes, cardinality=32, bottleneck_width=4, stride=1):
super(Block, self).__init__()
group_width = cardinality * bottleneck_width
self.conv1 = nn.Conv2d(in_planes, group_width, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(group_width)
self.conv2 = nn.Conv2d(group_width, group_width, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn2 = nn.BatchNorm2d(group_width)
self.conv3 = nn.Conv2d(group_width, self.expansion*group_width, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*group_width)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*group_width:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*group_width, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*group_width)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNeXt(nn.Module):
def __init__(self, num_blocks, cardinality, bottleneck_width, num_classes=10):
super(ResNeXt, self).__init__()
self.cardinality = cardinality
self.bottleneck_width = bottleneck_width
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(num_blocks[0], 1)
self.layer2 = self._make_layer(num_blocks[1], 2)
self.layer3 = self._make_layer(num_blocks[2], 2)
# self.layer4 = self._make_layer(num_blocks[3], 2)
self.linear = nn.Linear(cardinality*bottleneck_width*8, num_classes)
def _make_layer(self, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(Block(self.in_planes, self.cardinality, self.bottleneck_width, stride))
self.in_planes = Block.expansion * self.cardinality * self.bottleneck_width
# Increase bottleneck_width by 2 after each stage.
self.bottleneck_width *= 2
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
# out = self.layer4(out)
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNeXt29_2x64d():
return ResNeXt(num_blocks=[3,3,3], cardinality=2, bottleneck_width=64)
def ResNeXt29_4x64d():
return ResNeXt(num_blocks=[3,3,3], cardinality=4, bottleneck_width=64)
def ResNeXt29_8x64d():
return ResNeXt(num_blocks=[3,3,3], cardinality=8, bottleneck_width=64)
def ResNeXt29_32x4d():
return ResNeXt(num_blocks=[3,3,3], cardinality=32, bottleneck_width=4)
def test_resnext():
net = ResNeXt29_2x64d()
x = torch.randn(1,3,32,32)
y = net(x)
print(y.size())
# test_resnext()
| 3,478 | 35.239583 | 129 | py |
DEAT | DEAT-main/models/senet.py | '''SENet in PyTorch.
SENet is the winner of ImageNet-2017. The paper is not released yet.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes)
)
# SE layers
self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1) # Use nn.Conv2d instead of nn.Linear
self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# Squeeze
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
# Excitation
out = out * w # New broadcasting feature from v0.2!
out += self.shortcut(x)
out = F.relu(out)
return out
class PreActBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False)
)
# SE layers
self.fc1 = nn.Conv2d(planes, planes//16, kernel_size=1)
self.fc2 = nn.Conv2d(planes//16, planes, kernel_size=1)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
# Squeeze
w = F.avg_pool2d(out, out.size(2))
w = F.relu(self.fc1(w))
w = F.sigmoid(self.fc2(w))
# Excitation
out = out * w
out += shortcut
return out
class SENet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(SENet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def SENet18():
return SENet(PreActBlock, [2,2,2,2])
def test():
net = SENet18()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
| 4,027 | 32.016393 | 102 | py |
DEAT | DEAT-main/models/shufflenet.py | '''ShuffleNet in PyTorch.
See the paper "ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
'''Channel shuffle: [N,C,H,W] -> [N,g,C/g,H,W] -> [N,C/g,g,H,w] -> [N,C,H,W]'''
N,C,H,W = x.size()
g = self.groups
return x.view(N,g,C//g,H,W).permute(0,2,1,3,4).reshape(N,C,H,W)
class Bottleneck(nn.Module):
def __init__(self, in_planes, out_planes, stride, groups):
super(Bottleneck, self).__init__()
self.stride = stride
mid_planes = out_planes/4
g = 1 if in_planes==24 else groups
self.conv1 = nn.Conv2d(in_planes, mid_planes, kernel_size=1, groups=g, bias=False)
self.bn1 = nn.BatchNorm2d(mid_planes)
self.shuffle1 = ShuffleBlock(groups=g)
self.conv2 = nn.Conv2d(mid_planes, mid_planes, kernel_size=3, stride=stride, padding=1, groups=mid_planes, bias=False)
self.bn2 = nn.BatchNorm2d(mid_planes)
self.conv3 = nn.Conv2d(mid_planes, out_planes, kernel_size=1, groups=groups, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 2:
self.shortcut = nn.Sequential(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.shuffle1(out)
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
res = self.shortcut(x)
out = F.relu(torch.cat([out,res], 1)) if self.stride==2 else F.relu(out+res)
return out
class ShuffleNet(nn.Module):
def __init__(self, cfg):
super(ShuffleNet, self).__init__()
out_planes = cfg['out_planes']
num_blocks = cfg['num_blocks']
groups = cfg['groups']
self.conv1 = nn.Conv2d(3, 24, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(24)
self.in_planes = 24
self.layer1 = self._make_layer(out_planes[0], num_blocks[0], groups)
self.layer2 = self._make_layer(out_planes[1], num_blocks[1], groups)
self.layer3 = self._make_layer(out_planes[2], num_blocks[2], groups)
self.linear = nn.Linear(out_planes[2], 10)
def _make_layer(self, out_planes, num_blocks, groups):
layers = []
for i in range(num_blocks):
stride = 2 if i == 0 else 1
cat_planes = self.in_planes if i == 0 else 0
layers.append(Bottleneck(self.in_planes, out_planes-cat_planes, stride=stride, groups=groups))
self.in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ShuffleNetG2():
cfg = {
'out_planes': [200,400,800],
'num_blocks': [4,8,4],
'groups': 2
}
return ShuffleNet(cfg)
def ShuffleNetG3():
cfg = {
'out_planes': [240,480,960],
'num_blocks': [4,8,4],
'groups': 3
}
return ShuffleNet(cfg)
def test():
net = ShuffleNetG2()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
| 3,542 | 31.209091 | 126 | py |
DEAT | DEAT-main/models/lenet.py | '''LeNet in PyTorch.'''
import torch.nn as nn
import torch.nn.functional as F
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
| 699 | 28.166667 | 43 | py |
DEAT | DEAT-main/models/__init__.py | from .vgg import *
from .dpn import *
from .lenet import *
from .senet import *
from .pnasnet import *
from .densenet import *
from .googlenet import *
from .shufflenet import *
from .shufflenetv2 import *
from .resnet import *
from .resnext import *
from .mobilenet import *
from .mobilenetv2 import *
from .efficientnet import *
from .regnet import *
| 353 | 21.125 | 27 | py |
DEAT | DEAT-main/models/mobilenet.py | '''MobileNet in PyTorch.
See the paper "MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications"
for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Block(nn.Module):
'''Depthwise conv + Pointwise conv'''
def __init__(self, in_planes, out_planes, stride=1):
super(Block, self).__init__()
self.conv1 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=in_planes, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
return out
class MobileNet(nn.Module):
# (128,2) means conv planes=128, conv stride=2, by default conv stride=1
cfg = [64, (128,2), 128, (256,2), 256, (512,2), 512, 512, 512, 512, 512, (1024,2), 1024]
def __init__(self, num_classes=10):
super(MobileNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.linear = nn.Linear(1024, num_classes)
def _make_layers(self, in_planes):
layers = []
for x in self.cfg:
out_planes = x if isinstance(x, int) else x[0]
stride = 1 if isinstance(x, int) else x[1]
layers.append(Block(in_planes, out_planes, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.avg_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = MobileNet()
x = torch.randn(1,3,32,32)
y = net(x)
print(y.size())
# test()
| 2,025 | 31.677419 | 123 | py |
DEAT | DEAT-main/models/dpn.py | '''Dual Path Networks in PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer):
super(Bottleneck, self).__init__()
self.out_planes = out_planes
self.dense_depth = dense_depth
self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=False)
self.bn2 = nn.BatchNorm2d(in_planes)
self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes+dense_depth)
self.shortcut = nn.Sequential()
if first_layer:
self.shortcut = nn.Sequential(
nn.Conv2d(last_planes, out_planes+dense_depth, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_planes+dense_depth)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
x = self.shortcut(x)
d = self.out_planes
out = torch.cat([x[:,:d,:,:]+out[:,:d,:,:], x[:,d:,:,:], out[:,d:,:,:]], 1)
out = F.relu(out)
return out
class DPN(nn.Module):
def __init__(self, cfg):
super(DPN, self).__init__()
in_planes, out_planes = cfg['in_planes'], cfg['out_planes']
num_blocks, dense_depth = cfg['num_blocks'], cfg['dense_depth']
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.last_planes = 64
self.layer1 = self._make_layer(in_planes[0], out_planes[0], num_blocks[0], dense_depth[0], stride=1)
self.layer2 = self._make_layer(in_planes[1], out_planes[1], num_blocks[1], dense_depth[1], stride=2)
self.layer3 = self._make_layer(in_planes[2], out_planes[2], num_blocks[2], dense_depth[2], stride=2)
self.layer4 = self._make_layer(in_planes[3], out_planes[3], num_blocks[3], dense_depth[3], stride=2)
self.linear = nn.Linear(out_planes[3]+(num_blocks[3]+1)*dense_depth[3], 10)
def _make_layer(self, in_planes, out_planes, num_blocks, dense_depth, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for i,stride in enumerate(strides):
layers.append(Bottleneck(self.last_planes, in_planes, out_planes, dense_depth, stride, i==0))
self.last_planes = out_planes + (i+2) * dense_depth
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DPN26():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (2,2,2,2),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def DPN92():
cfg = {
'in_planes': (96,192,384,768),
'out_planes': (256,512,1024,2048),
'num_blocks': (3,4,20,3),
'dense_depth': (16,32,24,128)
}
return DPN(cfg)
def test():
net = DPN92()
x = torch.randn(1,3,32,32)
y = net(x)
print(y)
# test()
| 3,562 | 34.989899 | 116 | py |
DEAT | DEAT-main/Positive_Negative_Momentum/pnm_optim/pnm.py |
import math
import torch
from torch.optim.optimizer import Optimizer, required
class PNM(Optimizer):
r"""Implements Positive-Negative Momentum (PNM).
It has be proposed in
`Positive-Negative Momentum: Manipulating Stochastic Gradient Noise to Improve
Generalization`__.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
betas (Tuple[float, float], optional): inertia coefficients used for computing
pn momentum(default: (0.9, 1.))
weight_decay (float, optional): weight decay (default: 0)
decoupled (bool, optional): decoupled weight decay or L2 regularization (default: True)
"""
def __init__(self, params, lr=required, betas=(0.9, 1.), weight_decay=0, decoupled=True):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0. <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, weight_decay=weight_decay, decoupled=decoupled)
super(PNM, self).__init__(params, defaults)
def __setstate__(self, state):
super(PNM, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('decoupled', True)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad
# Perform decoupled weight decay or L2 Regularization
if group['decoupled']:
p.mul_(1 - group['lr'] * group['weight_decay'])
else:
d_p.add_(p.data, alpha=group['weight_decay'])
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['step'] += 1
param_state = self.state[p]
if state['step'] == 1:
pos_momentum = param_state['pos_momentum'] = torch.zeros_like(p, memory_format=torch.preserve_format)
neg_momentum = param_state['neg_momentum'] = torch.zeros_like(p, memory_format=torch.preserve_format)
elif state['step'] % 2 == 1:
pos_momentum = param_state['pos_momentum']
neg_momentum = param_state['neg_momentum']
else:
neg_momentum = param_state['pos_momentum']
pos_momentum = param_state['neg_momentum']
pos_momentum.mul_(beta1**2).add_(d_p, alpha=1-beta1**2)
noise_norm = math.sqrt((1+beta2) ** 2 + beta2 ** 2)
delta_p = pos_momentum.mul(1+beta2).add(neg_momentum, alpha=-beta2).mul(1/noise_norm)
p.add_(delta_p, alpha=-group['lr'])
return loss
| 3,616 | 39.188889 | 121 | py |
DEAT | DEAT-main/Positive_Negative_Momentum/pnm_optim/__init__.py |
from .pnm import *
from .adapnm import *
del pnm
del adapnm
| 62 | 8 | 21 | py |
DEAT | DEAT-main/Positive_Negative_Momentum/pnm_optim/adapnm.py |
import math
import torch
from torch.optim.optimizer import Optimizer, required
class AdaPNM(Optimizer):
r"""Implements Adaptive Positive-Negative Momentum.
It has be proposed in
`Positive-Negative Momentum: Manipulating Stochastic Gradient Noise to Improve
Generalization`__.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float, float], optional): coefficients used for computing
running averages of gradient, its square, and pn momentum (default: (0.9,0.999, 1.))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): decoupled weight decay (default: 0.)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: True)
decoupled (bool, optional): decoupled weight decay or L2 regularization (default: True)
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999, 1.), eps=1e-8,
weight_decay=0., amsgrad=True, decoupled=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0. <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad, decoupled=decoupled)
super(AdaPNM, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdaPNM, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', True)
group.setdefault('decoupled', True)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError('This optimizer does not support sparse gradients.')
# Perform decoupled weight decay or L2 Regularization
if group['decoupled']:
p.mul_(1 - group['lr'] * group['weight_decay'])
else:
grad.add_(p.data, alpha=group['weight_decay'])
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of gradient values
state['neg_exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2, beta3 = group['betas']
state['step'] += 1
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
exp_avg_sq = state['exp_avg_sq']
if state['step'] % 2 == 1:
exp_avg, neg_exp_avg = state['exp_avg'], state['neg_exp_avg']
else:
exp_avg, neg_exp_avg = state['neg_exp_avg'], state['exp_avg']
exp_avg.mul_(beta1**2).add_(grad, alpha=1 - beta1**2)
noise_norm = math.sqrt((1+beta3) ** 2 + beta3 ** 2)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
else:
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps'])
step_size = group['lr'] / bias_correction1
pnmomentum = exp_avg.mul(1+beta3).add(neg_exp_avg,alpha=-beta3).mul(1/noise_norm)
p.addcdiv_(pnmomentum, denom, value=-step_size)
return loss
| 5,725 | 45.177419 | 106 | py |
DEAT | DEAT-main/Positive_Negative_Momentum/model/resnet.py | '''ResNet in PyTorch.
The source code is adopted from:
https://github.com/kuangliu/pytorch-cifar
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18(num_classes=10):
return ResNet(BasicBlock, [2,2,2,2], num_classes=num_classes)
def ResNet34(num_classes=10):
return ResNet(BasicBlock, [3,4,6,3], num_classes=num_classes)
def ResNet50(num_classes=10):
return ResNet(Bottleneck, [3,4,6,3], num_classes=num_classes)
def ResNet101(num_classes=10):
return ResNet(Bottleneck, [3,4,23,3], num_classes=num_classes)
def ResNet152(num_classes=10):
return ResNet(Bottleneck, [3,8,36,3], num_classes=num_classes)
| 4,138 | 34.076271 | 102 | py |
DEAT | DEAT-main/Positive_Negative_Momentum/model/vgg.py | '''VGG11/13/16/19 in Pytorch.
The source code is adopted from:
https://github.com/kuangliu/pytorch-cifar
'''
import torch
import torch.nn as nn
cfg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, vgg_name, num_classes=10):
super(VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, num_classes)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1),
nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
| 1,432 | 31.568182 | 117 | py |
DEAT | DEAT-main/Positive_Negative_Momentum/model/densenet.py | '''DenseNet in PyTorch.
The source code is adopted from:
https://github.com/kuangliu/pytorch-cifar
'''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, in_planes, growth_rate):
super(Bottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, 4*growth_rate, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(4*growth_rate)
self.conv2 = nn.Conv2d(4*growth_rate, growth_rate, kernel_size=3, padding=1, bias=False)
def forward(self, x):
out = self.conv1(F.relu(self.bn1(x)))
out = self.conv2(F.relu(self.bn2(out)))
out = torch.cat([out,x], 1)
return out
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out
class DenseNet(nn.Module):
def __init__(self, block, nblocks, growth_rate=12, reduction=0.5, num_classes=10):
super(DenseNet, self).__init__()
self.growth_rate = growth_rate
num_planes = 2*growth_rate
self.conv1 = nn.Conv2d(3, num_planes, kernel_size=3, padding=1, bias=False)
self.dense1 = self._make_dense_layers(block, num_planes, nblocks[0])
num_planes += nblocks[0]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans1 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense2 = self._make_dense_layers(block, num_planes, nblocks[1])
num_planes += nblocks[1]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans2 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense3 = self._make_dense_layers(block, num_planes, nblocks[2])
num_planes += nblocks[2]*growth_rate
out_planes = int(math.floor(num_planes*reduction))
self.trans3 = Transition(num_planes, out_planes)
num_planes = out_planes
self.dense4 = self._make_dense_layers(block, num_planes, nblocks[3])
num_planes += nblocks[3]*growth_rate
self.bn = nn.BatchNorm2d(num_planes)
self.linear = nn.Linear(num_planes, num_classes)
def _make_dense_layers(self, block, in_planes, nblock):
layers = []
for i in range(nblock):
layers.append(block(in_planes, self.growth_rate))
in_planes += self.growth_rate
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.trans3(self.dense3(out))
out = self.dense4(out)
out = F.avg_pool2d(F.relu(self.bn(out)), 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def DenseNet121(num_classes=10):
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=32, num_classes=num_classes)
def DenseNet169(num_classes=10):
return DenseNet(Bottleneck, [6,12,32,32], growth_rate=32, num_classes=num_classes)
def DenseNet201(num_classes=10):
return DenseNet(Bottleneck, [6,12,48,32], growth_rate=32, num_classes=num_classes)
def DenseNet161(num_classes=10):
return DenseNet(Bottleneck, [6,12,36,24], growth_rate=48, num_classes=num_classes)
def densenet_cifar(num_classes=10):
return DenseNet(Bottleneck, [6,12,24,16], growth_rate=12, num_classes=num_classes)
| 3,707 | 33.981132 | 96 | py |
DEAT | DEAT-main/Positive_Negative_Momentum/model/googlenet.py | """google net in pytorch
The source code is adopted from:
https://github.com/weiaicunzai/pytorch-cifar100/
[1] Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
Going Deeper with Convolutions
https://arxiv.org/abs/1409.4842v1
"""
import torch
import torch.nn as nn
class Inception(nn.Module):
def __init__(self, input_channels, n1x1, n3x3_reduce, n3x3, n5x5_reduce, n5x5, pool_proj):
super().__init__()
#1x1conv branch
self.b1 = nn.Sequential(
nn.Conv2d(input_channels, n1x1, kernel_size=1),
nn.BatchNorm2d(n1x1),
nn.ReLU(inplace=True)
)
#1x1conv -> 3x3conv branch
self.b2 = nn.Sequential(
nn.Conv2d(input_channels, n3x3_reduce, kernel_size=1),
nn.BatchNorm2d(n3x3_reduce),
nn.ReLU(inplace=True),
nn.Conv2d(n3x3_reduce, n3x3, kernel_size=3, padding=1),
nn.BatchNorm2d(n3x3),
nn.ReLU(inplace=True)
)
#1x1conv -> 5x5conv branch
#we use 2 3x3 conv filters stacked instead
#of 1 5x5 filters to obtain the same receptive
#field with fewer parameters
self.b3 = nn.Sequential(
nn.Conv2d(input_channels, n5x5_reduce, kernel_size=1),
nn.BatchNorm2d(n5x5_reduce),
nn.ReLU(inplace=True),
nn.Conv2d(n5x5_reduce, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5, n5x5),
nn.ReLU(inplace=True),
nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(inplace=True)
)
#3x3pooling -> 1x1conv
#same conv
self.b4 = nn.Sequential(
nn.MaxPool2d(3, stride=1, padding=1),
nn.Conv2d(input_channels, pool_proj, kernel_size=1),
nn.BatchNorm2d(pool_proj),
nn.ReLU(inplace=True)
)
def forward(self, x):
return torch.cat([self.b1(x), self.b2(x), self.b3(x), self.b4(x)], dim=1)
class GoogleNet(nn.Module):
def __init__(self, num_class=100):
super().__init__()
self.prelayer = nn.Sequential(
nn.Conv2d(3, 192, kernel_size=3, padding=1),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True)
)
#although we only use 1 conv layer as prelayer,
#we still use name a3, b3.......
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
#"""In general, an Inception network is a network consisting of
#modules of the above type stacked upon each other, with occasional
#max-pooling layers with stride 2 to halve the resolution of the
#grid"""
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
#input feature size: 8*8*1024
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.dropout = nn.Dropout2d(p=0.4)
self.linear = nn.Linear(1024, num_class)
def forward(self, x):
output = self.prelayer(x)
output = self.a3(output)
output = self.b3(output)
output = self.maxpool(output)
output = self.a4(output)
output = self.b4(output)
output = self.c4(output)
output = self.d4(output)
output = self.e4(output)
output = self.maxpool(output)
output = self.a5(output)
output = self.b5(output)
#"""It was found that a move from fully connected layers to
#average pooling improved the top-1 accuracy by about 0.6%,
#however the use of dropout remained essential even after
#removing the fully connected layers."""
output = self.avgpool(output)
output = self.dropout(output)
output = output.view(output.size()[0], -1)
output = self.linear(output)
return output
def googlenet():
return GoogleNet()
| 4,443 | 32.164179 | 94 | py |
DEAT | DEAT-main/Positive_Negative_Momentum/model/__init__.py |
from .resnet import *
from .vgg import *
from .densenet import *
from .googlenet import *
del resnet
del vgg
del densenet
del googlenet
| 139 | 10.666667 | 24 | py |
beta-tcvae | beta-tcvae-master/disentanglement_metrics.py | import math
import os
import torch
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.autograd import Variable
import lib.utils as utils
from metric_helpers.loader import load_model_and_dataset
from metric_helpers.mi_metric import compute_metric_shapes, compute_metric_faces
def estimate_entropies(qz_samples, qz_params, q_dist, n_samples=10000, weights=None):
"""Computes the term:
E_{p(x)} E_{q(z|x)} [-log q(z)]
and
E_{p(x)} E_{q(z_j|x)} [-log q(z_j)]
where q(z) = 1/N sum_n=1^N q(z|x_n).
Assumes samples are from q(z|x) for *all* x in the dataset.
Assumes that q(z|x) is factorial ie. q(z|x) = prod_j q(z_j|x).
Computes numerically stable NLL:
- log q(z) = log N - logsumexp_n=1^N log q(z|x_n)
Inputs:
-------
qz_samples (K, N) Variable
qz_params (N, K, nparams) Variable
weights (N) Variable
"""
# Only take a sample subset of the samples
if weights is None:
qz_samples = qz_samples.index_select(1, Variable(torch.randperm(qz_samples.size(1))[:n_samples].cuda()))
else:
sample_inds = torch.multinomial(weights, n_samples, replacement=True)
qz_samples = qz_samples.index_select(1, sample_inds)
K, S = qz_samples.size()
N, _, nparams = qz_params.size()
assert(nparams == q_dist.nparams)
assert(K == qz_params.size(1))
if weights is None:
weights = -math.log(N)
else:
weights = torch.log(weights.view(N, 1, 1) / weights.sum())
entropies = torch.zeros(K).cuda()
pbar = tqdm(total=S)
k = 0
while k < S:
batch_size = min(10, S - k)
logqz_i = q_dist.log_density(
qz_samples.view(1, K, S).expand(N, K, S)[:, :, k:k + batch_size],
qz_params.view(N, K, 1, nparams).expand(N, K, S, nparams)[:, :, k:k + batch_size])
k += batch_size
# computes - log q(z_i) summed over minibatch
entropies += - utils.logsumexp(logqz_i + weights, dim=0, keepdim=False).data.sum(1)
pbar.update(batch_size)
pbar.close()
entropies /= S
return entropies
def mutual_info_metric_shapes(vae, shapes_dataset):
dataset_loader = DataLoader(shapes_dataset, batch_size=1000, num_workers=1, shuffle=False)
N = len(dataset_loader.dataset) # number of data samples
K = vae.z_dim # number of latent variables
nparams = vae.q_dist.nparams
vae.eval()
print('Computing q(z|x) distributions.')
qz_params = torch.Tensor(N, K, nparams)
n = 0
for xs in dataset_loader:
batch_size = xs.size(0)
xs = Variable(xs.view(batch_size, 1, 64, 64).cuda(), volatile=True)
qz_params[n:n + batch_size] = vae.encoder.forward(xs).view(batch_size, vae.z_dim, nparams).data
n += batch_size
qz_params = Variable(qz_params.view(3, 6, 40, 32, 32, K, nparams).cuda())
qz_samples = vae.q_dist.sample(params=qz_params)
print('Estimating marginal entropies.')
# marginal entropies
marginal_entropies = estimate_entropies(
qz_samples.view(N, K).transpose(0, 1),
qz_params.view(N, K, nparams),
vae.q_dist)
marginal_entropies = marginal_entropies.cpu()
cond_entropies = torch.zeros(4, K)
print('Estimating conditional entropies for scale.')
for i in range(6):
qz_samples_scale = qz_samples[:, i, :, :, :, :].contiguous()
qz_params_scale = qz_params[:, i, :, :, :, :].contiguous()
cond_entropies_i = estimate_entropies(
qz_samples_scale.view(N // 6, K).transpose(0, 1),
qz_params_scale.view(N // 6, K, nparams),
vae.q_dist)
cond_entropies[0] += cond_entropies_i.cpu() / 6
print('Estimating conditional entropies for orientation.')
for i in range(40):
qz_samples_scale = qz_samples[:, :, i, :, :, :].contiguous()
qz_params_scale = qz_params[:, :, i, :, :, :].contiguous()
cond_entropies_i = estimate_entropies(
qz_samples_scale.view(N // 40, K).transpose(0, 1),
qz_params_scale.view(N // 40, K, nparams),
vae.q_dist)
cond_entropies[1] += cond_entropies_i.cpu() / 40
print('Estimating conditional entropies for pos x.')
for i in range(32):
qz_samples_scale = qz_samples[:, :, :, i, :, :].contiguous()
qz_params_scale = qz_params[:, :, :, i, :, :].contiguous()
cond_entropies_i = estimate_entropies(
qz_samples_scale.view(N // 32, K).transpose(0, 1),
qz_params_scale.view(N // 32, K, nparams),
vae.q_dist)
cond_entropies[2] += cond_entropies_i.cpu() / 32
print('Estimating conditional entropies for pox y.')
for i in range(32):
qz_samples_scale = qz_samples[:, :, :, :, i, :].contiguous()
qz_params_scale = qz_params[:, :, :, :, i, :].contiguous()
cond_entropies_i = estimate_entropies(
qz_samples_scale.view(N // 32, K).transpose(0, 1),
qz_params_scale.view(N // 32, K, nparams),
vae.q_dist)
cond_entropies[3] += cond_entropies_i.cpu() / 32
metric = compute_metric_shapes(marginal_entropies, cond_entropies)
return metric, marginal_entropies, cond_entropies
def mutual_info_metric_faces(vae, shapes_dataset):
dataset_loader = DataLoader(shapes_dataset, batch_size=1000, num_workers=1, shuffle=False)
N = len(dataset_loader.dataset) # number of data samples
K = vae.z_dim # number of latent variables
nparams = vae.q_dist.nparams
vae.eval()
print('Computing q(z|x) distributions.')
qz_params = torch.Tensor(N, K, nparams)
n = 0
for xs in dataset_loader:
batch_size = xs.size(0)
xs = Variable(xs.view(batch_size, 1, 64, 64).cuda(), volatile=True)
qz_params[n:n + batch_size] = vae.encoder.forward(xs).view(batch_size, vae.z_dim, nparams).data
n += batch_size
qz_params = Variable(qz_params.view(50, 21, 11, 11, K, nparams).cuda())
qz_samples = vae.q_dist.sample(params=qz_params)
print('Estimating marginal entropies.')
# marginal entropies
marginal_entropies = estimate_entropies(
qz_samples.view(N, K).transpose(0, 1),
qz_params.view(N, K, nparams),
vae.q_dist)
marginal_entropies = marginal_entropies.cpu()
cond_entropies = torch.zeros(3, K)
print('Estimating conditional entropies for azimuth.')
for i in range(21):
qz_samples_pose_az = qz_samples[:, i, :, :, :].contiguous()
qz_params_pose_az = qz_params[:, i, :, :, :].contiguous()
cond_entropies_i = estimate_entropies(
qz_samples_pose_az.view(N // 21, K).transpose(0, 1),
qz_params_pose_az.view(N // 21, K, nparams),
vae.q_dist)
cond_entropies[0] += cond_entropies_i.cpu() / 21
print('Estimating conditional entropies for elevation.')
for i in range(11):
qz_samples_pose_el = qz_samples[:, :, i, :, :].contiguous()
qz_params_pose_el = qz_params[:, :, i, :, :].contiguous()
cond_entropies_i = estimate_entropies(
qz_samples_pose_el.view(N // 11, K).transpose(0, 1),
qz_params_pose_el.view(N // 11, K, nparams),
vae.q_dist)
cond_entropies[1] += cond_entropies_i.cpu() / 11
print('Estimating conditional entropies for lighting.')
for i in range(11):
qz_samples_lighting = qz_samples[:, :, :, i, :].contiguous()
qz_params_lighting = qz_params[:, :, :, i, :].contiguous()
cond_entropies_i = estimate_entropies(
qz_samples_lighting.view(N // 11, K).transpose(0, 1),
qz_params_lighting.view(N // 11, K, nparams),
vae.q_dist)
cond_entropies[2] += cond_entropies_i.cpu() / 11
metric = compute_metric_faces(marginal_entropies, cond_entropies)
return metric, marginal_entropies, cond_entropies
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--checkpt', required=True)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--save', type=str, default='.')
args = parser.parse_args()
if args.gpu != 0:
torch.cuda.set_device(args.gpu)
vae, dataset, cpargs = load_model_and_dataset(args.checkpt)
metric, marginal_entropies, cond_entropies = eval('mutual_info_metric_' + cpargs.dataset)(vae, dataset)
torch.save({
'metric': metric,
'marginal_entropies': marginal_entropies,
'cond_entropies': cond_entropies,
}, os.path.join(args.save, 'disentanglement_metric.pth'))
print('MIG: {:.2f}'.format(metric))
| 8,678 | 34.863636 | 112 | py |
beta-tcvae | beta-tcvae-master/vae_quant.py | import os
import time
import math
from numbers import Number
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import visdom
from torch.autograd import Variable
from torch.utils.data import DataLoader
import lib.dist as dist
import lib.utils as utils
import lib.datasets as dset
from lib.flows import FactorialNormalizingFlow
from elbo_decomposition import elbo_decomposition
from plot_latent_vs_true import plot_vs_gt_shapes, plot_vs_gt_faces # noqa: F401
class MLPEncoder(nn.Module):
def __init__(self, output_dim):
super(MLPEncoder, self).__init__()
self.output_dim = output_dim
self.fc1 = nn.Linear(4096, 1200)
self.fc2 = nn.Linear(1200, 1200)
self.fc3 = nn.Linear(1200, output_dim)
self.conv_z = nn.Conv2d(64, output_dim, 4, 1, 0)
# setup the non-linearity
self.act = nn.ReLU(inplace=True)
def forward(self, x):
h = x.view(-1, 64 * 64)
h = self.act(self.fc1(h))
h = self.act(self.fc2(h))
h = self.fc3(h)
z = h.view(x.size(0), self.output_dim)
return z
class MLPDecoder(nn.Module):
def __init__(self, input_dim):
super(MLPDecoder, self).__init__()
self.net = nn.Sequential(
nn.Linear(input_dim, 1200),
nn.Tanh(),
nn.Linear(1200, 1200),
nn.Tanh(),
nn.Linear(1200, 1200),
nn.Tanh(),
nn.Linear(1200, 4096)
)
def forward(self, z):
h = z.view(z.size(0), -1)
h = self.net(h)
mu_img = h.view(z.size(0), 1, 64, 64)
return mu_img
class ConvEncoder(nn.Module):
def __init__(self, output_dim):
super(ConvEncoder, self).__init__()
self.output_dim = output_dim
self.conv1 = nn.Conv2d(1, 32, 4, 2, 1) # 32 x 32
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 32, 4, 2, 1) # 16 x 16
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 64, 4, 2, 1) # 8 x 8
self.bn3 = nn.BatchNorm2d(64)
self.conv4 = nn.Conv2d(64, 64, 4, 2, 1) # 4 x 4
self.bn4 = nn.BatchNorm2d(64)
self.conv5 = nn.Conv2d(64, 512, 4)
self.bn5 = nn.BatchNorm2d(512)
self.conv_z = nn.Conv2d(512, output_dim, 1)
# setup the non-linearity
self.act = nn.ReLU(inplace=True)
def forward(self, x):
h = x.view(-1, 1, 64, 64)
h = self.act(self.bn1(self.conv1(h)))
h = self.act(self.bn2(self.conv2(h)))
h = self.act(self.bn3(self.conv3(h)))
h = self.act(self.bn4(self.conv4(h)))
h = self.act(self.bn5(self.conv5(h)))
z = self.conv_z(h).view(x.size(0), self.output_dim)
return z
class ConvDecoder(nn.Module):
def __init__(self, input_dim):
super(ConvDecoder, self).__init__()
self.conv1 = nn.ConvTranspose2d(input_dim, 512, 1, 1, 0) # 1 x 1
self.bn1 = nn.BatchNorm2d(512)
self.conv2 = nn.ConvTranspose2d(512, 64, 4, 1, 0) # 4 x 4
self.bn2 = nn.BatchNorm2d(64)
self.conv3 = nn.ConvTranspose2d(64, 64, 4, 2, 1) # 8 x 8
self.bn3 = nn.BatchNorm2d(64)
self.conv4 = nn.ConvTranspose2d(64, 32, 4, 2, 1) # 16 x 16
self.bn4 = nn.BatchNorm2d(32)
self.conv5 = nn.ConvTranspose2d(32, 32, 4, 2, 1) # 32 x 32
self.bn5 = nn.BatchNorm2d(32)
self.conv_final = nn.ConvTranspose2d(32, 1, 4, 2, 1)
# setup the non-linearity
self.act = nn.ReLU(inplace=True)
def forward(self, z):
h = z.view(z.size(0), z.size(1), 1, 1)
h = self.act(self.bn1(self.conv1(h)))
h = self.act(self.bn2(self.conv2(h)))
h = self.act(self.bn3(self.conv3(h)))
h = self.act(self.bn4(self.conv4(h)))
h = self.act(self.bn5(self.conv5(h)))
mu_img = self.conv_final(h)
return mu_img
class VAE(nn.Module):
def __init__(self, z_dim, use_cuda=False, prior_dist=dist.Normal(), q_dist=dist.Normal(),
include_mutinfo=True, tcvae=False, conv=False, mss=False):
super(VAE, self).__init__()
self.use_cuda = use_cuda
self.z_dim = z_dim
self.include_mutinfo = include_mutinfo
self.tcvae = tcvae
self.lamb = 0
self.beta = 1
self.mss = mss
self.x_dist = dist.Bernoulli()
# Model-specific
# distribution family of p(z)
self.prior_dist = prior_dist
self.q_dist = q_dist
# hyperparameters for prior p(z)
self.register_buffer('prior_params', torch.zeros(self.z_dim, 2))
# create the encoder and decoder networks
if conv:
self.encoder = ConvEncoder(z_dim * self.q_dist.nparams)
self.decoder = ConvDecoder(z_dim)
else:
self.encoder = MLPEncoder(z_dim * self.q_dist.nparams)
self.decoder = MLPDecoder(z_dim)
if use_cuda:
# calling cuda() here will put all the parameters of
# the encoder and decoder networks into gpu memory
self.cuda()
# return prior parameters wrapped in a suitable Variable
def _get_prior_params(self, batch_size=1):
expanded_size = (batch_size,) + self.prior_params.size()
prior_params = Variable(self.prior_params.expand(expanded_size))
return prior_params
# samples from the model p(x|z)p(z)
def model_sample(self, batch_size=1):
# sample from prior (value will be sampled by guide when computing the ELBO)
prior_params = self._get_prior_params(batch_size)
zs = self.prior_dist.sample(params=prior_params)
# decode the latent code z
x_params = self.decoder.forward(zs)
return x_params
# define the guide (i.e. variational distribution) q(z|x)
def encode(self, x):
x = x.view(x.size(0), 1, 64, 64)
# use the encoder to get the parameters used to define q(z|x)
z_params = self.encoder.forward(x).view(x.size(0), self.z_dim, self.q_dist.nparams)
# sample the latent code z
zs = self.q_dist.sample(params=z_params)
return zs, z_params
def decode(self, z):
x_params = self.decoder.forward(z).view(z.size(0), 1, 64, 64)
xs = self.x_dist.sample(params=x_params)
return xs, x_params
# define a helper function for reconstructing images
def reconstruct_img(self, x):
zs, z_params = self.encode(x)
xs, x_params = self.decode(zs)
return xs, x_params, zs, z_params
def _log_importance_weight_matrix(self, batch_size, dataset_size):
N = dataset_size
M = batch_size - 1
strat_weight = (N - M) / (N * M)
W = torch.Tensor(batch_size, batch_size).fill_(1 / M)
W.view(-1)[::M+1] = 1 / N
W.view(-1)[1::M+1] = strat_weight
W[M-1, 0] = strat_weight
return W.log()
def elbo(self, x, dataset_size):
# log p(x|z) + log p(z) - log q(z|x)
batch_size = x.size(0)
x = x.view(batch_size, 1, 64, 64)
prior_params = self._get_prior_params(batch_size)
x_recon, x_params, zs, z_params = self.reconstruct_img(x)
logpx = self.x_dist.log_density(x, params=x_params).view(batch_size, -1).sum(1)
logpz = self.prior_dist.log_density(zs, params=prior_params).view(batch_size, -1).sum(1)
logqz_condx = self.q_dist.log_density(zs, params=z_params).view(batch_size, -1).sum(1)
elbo = logpx + logpz - logqz_condx
if self.beta == 1 and self.include_mutinfo and self.lamb == 0:
return elbo, elbo.detach()
# compute log q(z) ~= log 1/(NM) sum_m=1^M q(z|x_m) = - log(MN) + logsumexp_m(q(z|x_m))
_logqz = self.q_dist.log_density(
zs.view(batch_size, 1, self.z_dim),
z_params.view(1, batch_size, self.z_dim, self.q_dist.nparams)
)
if not self.mss:
# minibatch weighted sampling
logqz_prodmarginals = (logsumexp(_logqz, dim=1, keepdim=False) - math.log(batch_size * dataset_size)).sum(1)
logqz = (logsumexp(_logqz.sum(2), dim=1, keepdim=False) - math.log(batch_size * dataset_size))
else:
# minibatch stratified sampling
logiw_matrix = Variable(self._log_importance_weight_matrix(batch_size, dataset_size).type_as(_logqz.data))
logqz = logsumexp(logiw_matrix + _logqz.sum(2), dim=1, keepdim=False)
logqz_prodmarginals = logsumexp(
logiw_matrix.view(batch_size, batch_size, 1) + _logqz, dim=1, keepdim=False).sum(1)
if not self.tcvae:
if self.include_mutinfo:
modified_elbo = logpx - self.beta * (
(logqz_condx - logpz) -
self.lamb * (logqz_prodmarginals - logpz)
)
else:
modified_elbo = logpx - self.beta * (
(logqz - logqz_prodmarginals) +
(1 - self.lamb) * (logqz_prodmarginals - logpz)
)
else:
if self.include_mutinfo:
modified_elbo = logpx - \
(logqz_condx - logqz) - \
self.beta * (logqz - logqz_prodmarginals) - \
(1 - self.lamb) * (logqz_prodmarginals - logpz)
else:
modified_elbo = logpx - \
self.beta * (logqz - logqz_prodmarginals) - \
(1 - self.lamb) * (logqz_prodmarginals - logpz)
return modified_elbo, elbo.detach()
def logsumexp(value, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(torch.exp(value0),
dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(torch.exp(value - m))
if isinstance(sum_exp, Number):
return m + math.log(sum_exp)
else:
return m + torch.log(sum_exp)
# for loading and batching datasets
def setup_data_loaders(args, use_cuda=False):
if args.dataset == 'shapes':
train_set = dset.Shapes()
elif args.dataset == 'faces':
train_set = dset.Faces()
else:
raise ValueError('Unknown dataset ' + str(args.dataset))
kwargs = {'num_workers': 4, 'pin_memory': use_cuda}
train_loader = DataLoader(dataset=train_set,
batch_size=args.batch_size, shuffle=True, **kwargs)
return train_loader
win_samples = None
win_test_reco = None
win_latent_walk = None
win_train_elbo = None
def display_samples(model, x, vis):
global win_samples, win_test_reco, win_latent_walk
# plot random samples
sample_mu = model.model_sample(batch_size=100).sigmoid()
sample_mu = sample_mu
images = list(sample_mu.view(-1, 1, 64, 64).data.cpu())
win_samples = vis.images(images, 10, 2, opts={'caption': 'samples'}, win=win_samples)
# plot the reconstructed distribution for the first 50 test images
test_imgs = x[:50, :]
_, reco_imgs, zs, _ = model.reconstruct_img(test_imgs)
reco_imgs = reco_imgs.sigmoid()
test_reco_imgs = torch.cat([
test_imgs.view(1, -1, 64, 64), reco_imgs.view(1, -1, 64, 64)], 0).transpose(0, 1)
win_test_reco = vis.images(
list(test_reco_imgs.contiguous().view(-1, 1, 64, 64).data.cpu()), 10, 2,
opts={'caption': 'test reconstruction image'}, win=win_test_reco)
# plot latent walks (change one variable while all others stay the same)
zs = zs[0:3]
batch_size, z_dim = zs.size()
xs = []
delta = torch.autograd.Variable(torch.linspace(-2, 2, 7), volatile=True).type_as(zs)
for i in range(z_dim):
vec = Variable(torch.zeros(z_dim)).view(1, z_dim).expand(7, z_dim).contiguous().type_as(zs)
vec[:, i] = 1
vec = vec * delta[:, None]
zs_delta = zs.clone().view(batch_size, 1, z_dim)
zs_delta[:, :, i] = 0
zs_walk = zs_delta + vec[None]
xs_walk = model.decoder.forward(zs_walk.view(-1, z_dim)).sigmoid()
xs.append(xs_walk)
xs = list(torch.cat(xs, 0).data.cpu())
win_latent_walk = vis.images(xs, 7, 2, opts={'caption': 'latent walk'}, win=win_latent_walk)
def plot_elbo(train_elbo, vis):
global win_train_elbo
win_train_elbo = vis.line(torch.Tensor(train_elbo), opts={'markers': True}, win=win_train_elbo)
def anneal_kl(args, vae, iteration):
if args.dataset == 'shapes':
warmup_iter = 7000
elif args.dataset == 'faces':
warmup_iter = 2500
if args.lambda_anneal:
vae.lamb = max(0, 0.95 - 1 / warmup_iter * iteration) # 1 --> 0
else:
vae.lamb = 0
if args.beta_anneal:
vae.beta = min(args.beta, args.beta / warmup_iter * iteration) # 0 --> 1
else:
vae.beta = args.beta
def main():
# parse command line arguments
parser = argparse.ArgumentParser(description="parse args")
parser.add_argument('-d', '--dataset', default='shapes', type=str, help='dataset name',
choices=['shapes', 'faces'])
parser.add_argument('-dist', default='normal', type=str, choices=['normal', 'laplace', 'flow'])
parser.add_argument('-n', '--num-epochs', default=50, type=int, help='number of training epochs')
parser.add_argument('-b', '--batch-size', default=2048, type=int, help='batch size')
parser.add_argument('-l', '--learning-rate', default=1e-3, type=float, help='learning rate')
parser.add_argument('-z', '--latent-dim', default=10, type=int, help='size of latent dimension')
parser.add_argument('--beta', default=1, type=float, help='ELBO penalty term')
parser.add_argument('--tcvae', action='store_true')
parser.add_argument('--exclude-mutinfo', action='store_true')
parser.add_argument('--beta-anneal', action='store_true')
parser.add_argument('--lambda-anneal', action='store_true')
parser.add_argument('--mss', action='store_true', help='use the improved minibatch estimator')
parser.add_argument('--conv', action='store_true')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--visdom', action='store_true', help='whether plotting in visdom is desired')
parser.add_argument('--save', default='test1')
parser.add_argument('--log_freq', default=200, type=int, help='num iterations per log')
args = parser.parse_args()
torch.cuda.set_device(args.gpu)
# data loader
train_loader = setup_data_loaders(args, use_cuda=True)
# setup the VAE
if args.dist == 'normal':
prior_dist = dist.Normal()
q_dist = dist.Normal()
elif args.dist == 'laplace':
prior_dist = dist.Laplace()
q_dist = dist.Laplace()
elif args.dist == 'flow':
prior_dist = FactorialNormalizingFlow(dim=args.latent_dim, nsteps=32)
q_dist = dist.Normal()
vae = VAE(z_dim=args.latent_dim, use_cuda=True, prior_dist=prior_dist, q_dist=q_dist,
include_mutinfo=not args.exclude_mutinfo, tcvae=args.tcvae, conv=args.conv, mss=args.mss)
# setup the optimizer
optimizer = optim.Adam(vae.parameters(), lr=args.learning_rate)
# setup visdom for visualization
if args.visdom:
vis = visdom.Visdom(env=args.save, port=4500)
train_elbo = []
# training loop
dataset_size = len(train_loader.dataset)
num_iterations = len(train_loader) * args.num_epochs
iteration = 0
# initialize loss accumulator
elbo_running_mean = utils.RunningAverageMeter()
while iteration < num_iterations:
for i, x in enumerate(train_loader):
iteration += 1
batch_time = time.time()
vae.train()
anneal_kl(args, vae, iteration)
optimizer.zero_grad()
# transfer to GPU
x = x.cuda(async=True)
# wrap the mini-batch in a PyTorch Variable
x = Variable(x)
# do ELBO gradient and accumulate loss
obj, elbo = vae.elbo(x, dataset_size)
if utils.isnan(obj).any():
raise ValueError('NaN spotted in objective.')
obj.mean().mul(-1).backward()
elbo_running_mean.update(elbo.mean().data[0])
optimizer.step()
# report training diagnostics
if iteration % args.log_freq == 0:
train_elbo.append(elbo_running_mean.avg)
print('[iteration %03d] time: %.2f \tbeta %.2f \tlambda %.2f training ELBO: %.4f (%.4f)' % (
iteration, time.time() - batch_time, vae.beta, vae.lamb,
elbo_running_mean.val, elbo_running_mean.avg))
vae.eval()
# plot training and test ELBOs
if args.visdom:
display_samples(vae, x, vis)
plot_elbo(train_elbo, vis)
utils.save_checkpoint({
'state_dict': vae.state_dict(),
'args': args}, args.save, 0)
eval('plot_vs_gt_' + args.dataset)(vae, train_loader.dataset,
os.path.join(args.save, 'gt_vs_latent_{:05d}.png'.format(iteration)))
# Report statistics after training
vae.eval()
utils.save_checkpoint({
'state_dict': vae.state_dict(),
'args': args}, args.save, 0)
dataset_loader = DataLoader(train_loader.dataset, batch_size=1000, num_workers=1, shuffle=False)
logpx, dependence, information, dimwise_kl, analytical_cond_kl, marginal_entropies, joint_entropy = \
elbo_decomposition(vae, dataset_loader)
torch.save({
'logpx': logpx,
'dependence': dependence,
'information': information,
'dimwise_kl': dimwise_kl,
'analytical_cond_kl': analytical_cond_kl,
'marginal_entropies': marginal_entropies,
'joint_entropy': joint_entropy
}, os.path.join(args.save, 'elbo_decomposition.pth'))
eval('plot_vs_gt_' + args.dataset)(vae, dataset_loader.dataset, os.path.join(args.save, 'gt_vs_latent.png'))
return vae
if __name__ == '__main__':
model = main()
| 18,265 | 36.975052 | 120 | py |
beta-tcvae | beta-tcvae-master/plot_latent_vs_true.py | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
import brewer2mpl
bmap = brewer2mpl.get_map('Set1', 'qualitative', 3)
colors = bmap.mpl_colors
plt.style.use('ggplot')
VAR_THRESHOLD = 1e-2
def plot_vs_gt_shapes(vae, shapes_dataset, save, z_inds=None):
dataset_loader = DataLoader(shapes_dataset, batch_size=1000, num_workers=1, shuffle=False)
N = len(dataset_loader.dataset) # number of data samples
K = vae.z_dim # number of latent variables
nparams = vae.q_dist.nparams
vae.eval()
# print('Computing q(z|x) distributions.')
qz_params = torch.Tensor(N, K, nparams)
n = 0
for xs in dataset_loader:
batch_size = xs.size(0)
xs = Variable(xs.view(batch_size, 1, 64, 64).cuda(), volatile=True)
qz_params[n:n + batch_size] = vae.encoder.forward(xs).view(batch_size, vae.z_dim, nparams).data
n += batch_size
qz_params = qz_params.view(3, 6, 40, 32, 32, K, nparams)
# z_j is inactive if Var_x(E[z_j|x]) < eps.
qz_means = qz_params[:, :, :, :, :, :, 0]
var = torch.std(qz_means.contiguous().view(N, K), dim=0).pow(2)
active_units = torch.arange(0, K)[var > VAR_THRESHOLD].long()
print('Active units: ' + ','.join(map(str, active_units.tolist())))
n_active = len(active_units)
print('Number of active units: {}/{}'.format(n_active, vae.z_dim))
if z_inds is None:
z_inds = active_units
# subplots where subplot[i, j] is gt_i vs. z_j
mean_scale = qz_means.mean(2).mean(2).mean(2) # (shape, scale, latent)
mean_rotation = qz_means.mean(1).mean(2).mean(2) # (shape, rotation, latent)
mean_pos = qz_means.mean(0).mean(0).mean(0) # (pos_x, pos_y, latent)
fig = plt.figure(figsize=(3, len(z_inds))) # default is (8,6)
gs = gridspec.GridSpec(len(z_inds), 3)
gs.update(wspace=0, hspace=0) # set the spacing between axes.
vmin_pos = torch.min(mean_pos)
vmax_pos = torch.max(mean_pos)
for i, j in enumerate(z_inds):
ax = fig.add_subplot(gs[i * 3])
ax.imshow(mean_pos[:, :, j].numpy(), cmap=plt.get_cmap('coolwarm'), vmin=vmin_pos, vmax=vmax_pos)
ax.set_xticks([])
ax.set_yticks([])
ax.set_ylabel(r'$z_' + str(j) + r'$')
if i == len(z_inds) - 1:
ax.set_xlabel(r'pos')
vmin_scale = torch.min(mean_scale)
vmax_scale = torch.max(mean_scale)
for i, j in enumerate(z_inds):
ax = fig.add_subplot(gs[1 + i * 3])
ax.plot(mean_scale[0, :, j].numpy(), color=colors[2])
ax.plot(mean_scale[1, :, j].numpy(), color=colors[0])
ax.plot(mean_scale[2, :, j].numpy(), color=colors[1])
ax.set_ylim([vmin_scale, vmax_scale])
ax.set_xticks([])
ax.set_yticks([])
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
ax.set_aspect(abs(x1 - x0) / abs(y1 - y0))
if i == len(z_inds) - 1:
ax.set_xlabel(r'scale')
vmin_rotation = torch.min(mean_rotation)
vmax_rotation = torch.max(mean_rotation)
for i, j in enumerate(z_inds):
ax = fig.add_subplot(gs[2 + i * 3])
ax.plot(mean_rotation[0, :, j].numpy(), color=colors[2])
ax.plot(mean_rotation[1, :, j].numpy(), color=colors[0])
ax.plot(mean_rotation[2, :, j].numpy(), color=colors[1])
ax.set_ylim([vmin_rotation, vmax_rotation])
ax.set_xticks([])
ax.set_yticks([])
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
ax.set_aspect(abs(x1 - x0) / abs(y1 - y0))
if i == len(z_inds) - 1:
ax.set_xlabel(r'rotation')
fig.text(0.5, 0.03, 'Ground Truth', ha='center')
fig.text(0.01, 0.5, 'Learned Latent Variables ', va='center', rotation='vertical')
plt.savefig(save)
plt.close()
def plot_vs_gt_faces(vae, faces_dataset, save, z_inds=None):
dataset_loader = DataLoader(faces_dataset, batch_size=1000, num_workers=1, shuffle=False)
N = len(dataset_loader.dataset) # number of data samples
K = vae.z_dim # number of latent variables
nparams = vae.q_dist.nparams
vae.eval()
# print('Computing q(z|x) distributions.')
qz_params = torch.Tensor(N, K, nparams)
n = 0
for xs in dataset_loader:
batch_size = xs.size(0)
xs = Variable(xs.view(batch_size, 1, 64, 64).cuda(), volatile=True)
qz_params[n:n + batch_size] = vae.encoder.forward(xs).view(batch_size, vae.z_dim, nparams).data
n += batch_size
qz_params = qz_params.view(50, 21, 11, 11, K, nparams)
# z_j is inactive if Var_x(E[z_j|x]) < eps.
qz_means = qz_params[:, :, :, :, :, 0]
var = torch.std(qz_means.contiguous().view(N, K), dim=0).pow(2)
active_units = torch.arange(0, K)[var > VAR_THRESHOLD].long()
print('Active units: ' + ','.join(map(str, active_units.tolist())))
n_active = len(active_units)
print('Number of active units: {}/{}'.format(n_active, vae.z_dim))
if z_inds is None:
z_inds = active_units
# subplots where subplot[i, j] is gt_i vs. z_j
mean_pose_az = qz_means.mean(3).mean(2).mean(0) # (pose_az, latent)
mean_pose_el = qz_means.mean(3).mean(1).mean(0) # (pose_el, latent)
mean_light_az = qz_means.mean(2).mean(1).mean(0) # (light_az, latent)
fig = plt.figure(figsize=(len(z_inds), 3)) # default is (8,6)
gs = gridspec.GridSpec(3, len(z_inds))
gs.update(wspace=0, hspace=0) # set the spacing between axes.
vmin_scale = torch.min(mean_pose_az)
vmax_scale = torch.max(mean_pose_az)
for i, j in enumerate(z_inds):
ax = fig.add_subplot(gs[i])
ax.plot(mean_pose_az[:, j].numpy())
ax.set_ylim([vmin_scale, vmax_scale])
ax.set_xticks([])
ax.set_yticks([])
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
ax.set_aspect(abs(x1 - x0) / abs(y1 - y0))
if i == 0:
ax.set_ylabel(r'azimuth')
vmin_scale = torch.min(mean_pose_el)
vmax_scale = torch.max(mean_pose_el)
for i, j in enumerate(z_inds):
ax = fig.add_subplot(gs[len(z_inds) + i])
ax.plot(mean_pose_el[:, j].numpy())
ax.set_ylim([vmin_scale, vmax_scale])
ax.set_xticks([])
ax.set_yticks([])
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
ax.set_aspect(abs(x1 - x0) / abs(y1 - y0))
if i == 0:
ax.set_ylabel(r'elevation')
vmin_scale = torch.min(mean_light_az)
vmax_scale = torch.max(mean_light_az)
for i, j in enumerate(z_inds):
ax = fig.add_subplot(gs[2 * len(z_inds) + i])
ax.plot(mean_light_az[:, j].numpy())
ax.set_ylim([vmin_scale, vmax_scale])
ax.set_xticks([])
ax.set_yticks([])
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
ax.set_aspect(abs(x1 - x0) / abs(y1 - y0))
if i == 0:
ax.set_ylabel(r'lighting')
plt.suptitle('GT Factors vs. Latent Variables')
plt.savefig(save)
plt.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-checkpt', required=True)
parser.add_argument('-zs', type=str, default=None)
parser.add_argument('-gpu', type=int, default=0)
parser.add_argument('-save', type=str, default='latent_vs_gt.pdf')
parser.add_argument('-elbo_decomp', action='store_true')
args = parser.parse_args()
from elbo_decomposition import elbo_decomposition
import lib.dist as dist
import lib.flows as flows
from vae_quant import VAE, setup_data_loaders
def load_model_and_dataset(checkpt_filename):
print('Loading model and dataset.')
checkpt = torch.load(checkpt_filename, map_location=lambda storage, loc: storage)
args = checkpt['args']
state_dict = checkpt['state_dict']
# model
if not hasattr(args, 'dist') or args.dist == 'normal':
prior_dist = dist.Normal()
q_dist = dist.Normal()
elif args.dist == 'laplace':
prior_dist = dist.Laplace()
q_dist = dist.Laplace()
elif args.dist == 'flow':
prior_dist = flows.FactorialNormalizingFlow(dim=args.latent_dim, nsteps=4)
q_dist = dist.Normal()
vae = VAE(z_dim=args.latent_dim, use_cuda=True, prior_dist=prior_dist, q_dist=q_dist, conv=args.conv)
vae.load_state_dict(state_dict, strict=False)
# dataset loader
loader = setup_data_loaders(args)
return vae, loader, args
z_inds = list(map(int, args.zs.split(','))) if args.zs is not None else None
torch.cuda.set_device(args.gpu)
vae, dataset_loader, cpargs = load_model_and_dataset(args.checkpt)
if args.elbo_decomp:
elbo_decomposition(vae, dataset_loader)
eval('plot_vs_gt_' + cpargs.dataset)(vae, dataset_loader.dataset, args.save, z_inds)
| 8,973 | 36.864979 | 109 | py |
beta-tcvae | beta-tcvae-master/elbo_decomposition.py | import os
import math
from numbers import Number
from tqdm import tqdm
import torch
from torch.autograd import Variable
import lib.dist as dist
import lib.flows as flows
def estimate_entropies(qz_samples, qz_params, q_dist):
"""Computes the term:
E_{p(x)} E_{q(z|x)} [-log q(z)]
and
E_{p(x)} E_{q(z_j|x)} [-log q(z_j)]
where q(z) = 1/N sum_n=1^N q(z|x_n).
Assumes samples are from q(z|x) for *all* x in the dataset.
Assumes that q(z|x) is factorial ie. q(z|x) = prod_j q(z_j|x).
Computes numerically stable NLL:
- log q(z) = log N - logsumexp_n=1^N log q(z|x_n)
Inputs:
-------
qz_samples (K, S) Variable
qz_params (N, K, nparams) Variable
"""
# Only take a sample subset of the samples
qz_samples = qz_samples.index_select(1, Variable(torch.randperm(qz_samples.size(1))[:10000].cuda()))
K, S = qz_samples.size()
N, _, nparams = qz_params.size()
assert(nparams == q_dist.nparams)
assert(K == qz_params.size(1))
marginal_entropies = torch.zeros(K).cuda()
joint_entropy = torch.zeros(1).cuda()
pbar = tqdm(total=S)
k = 0
while k < S:
batch_size = min(10, S - k)
logqz_i = q_dist.log_density(
qz_samples.view(1, K, S).expand(N, K, S)[:, :, k:k + batch_size],
qz_params.view(N, K, 1, nparams).expand(N, K, S, nparams)[:, :, k:k + batch_size])
k += batch_size
# computes - log q(z_i) summed over minibatch
marginal_entropies += (math.log(N) - logsumexp(logqz_i, dim=0, keepdim=False).data).sum(1)
# computes - log q(z) summed over minibatch
logqz = logqz_i.sum(1) # (N, S)
joint_entropy += (math.log(N) - logsumexp(logqz, dim=0, keepdim=False).data).sum(0)
pbar.update(batch_size)
pbar.close()
marginal_entropies /= S
joint_entropy /= S
return marginal_entropies, joint_entropy
def logsumexp(value, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(torch.exp(value0),
dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(torch.exp(value - m))
if isinstance(sum_exp, Number):
return m + math.log(sum_exp)
else:
return m + torch.log(sum_exp)
def analytical_NLL(qz_params, q_dist, prior_dist, qz_samples=None):
"""Computes the quantities
1/N sum_n=1^N E_{q(z|x)} [ - log q(z|x) ]
and
1/N sum_n=1^N E_{q(z_j|x)} [ - log p(z_j) ]
Inputs:
-------
qz_params (N, K, nparams) Variable
Returns:
--------
nlogqz_condx (K,) Variable
nlogpz (K,) Variable
"""
pz_params = Variable(torch.zeros(1).type_as(qz_params.data).expand(qz_params.size()), volatile=True)
nlogqz_condx = q_dist.NLL(qz_params).mean(0)
nlogpz = prior_dist.NLL(pz_params, qz_params).mean(0)
return nlogqz_condx, nlogpz
def elbo_decomposition(vae, dataset_loader):
N = len(dataset_loader.dataset) # number of data samples
K = vae.z_dim # number of latent variables
S = 1 # number of latent variable samples
nparams = vae.q_dist.nparams
print('Computing q(z|x) distributions.')
# compute the marginal q(z_j|x_n) distributions
qz_params = torch.Tensor(N, K, nparams)
n = 0
logpx = 0
for xs in dataset_loader:
batch_size = xs.size(0)
xs = Variable(xs.view(batch_size, -1, 64, 64).cuda(), volatile=True)
z_params = vae.encoder.forward(xs).view(batch_size, K, nparams)
qz_params[n:n + batch_size] = z_params.data
n += batch_size
# estimate reconstruction term
for _ in range(S):
z = vae.q_dist.sample(params=z_params)
x_params = vae.decoder.forward(z)
logpx += vae.x_dist.log_density(xs, params=x_params).view(batch_size, -1).data.sum()
# Reconstruction term
logpx = logpx / (N * S)
qz_params = Variable(qz_params.cuda(), volatile=True)
print('Sampling from q(z).')
# sample S times from each marginal q(z_j|x_n)
qz_params_expanded = qz_params.view(N, K, 1, nparams).expand(N, K, S, nparams)
qz_samples = vae.q_dist.sample(params=qz_params_expanded)
qz_samples = qz_samples.transpose(0, 1).contiguous().view(K, N * S)
print('Estimating entropies.')
marginal_entropies, joint_entropy = estimate_entropies(qz_samples, qz_params, vae.q_dist)
if hasattr(vae.q_dist, 'NLL'):
nlogqz_condx = vae.q_dist.NLL(qz_params).mean(0)
else:
nlogqz_condx = - vae.q_dist.log_density(qz_samples,
qz_params_expanded.transpose(0, 1).contiguous().view(K, N * S)).mean(1)
if hasattr(vae.prior_dist, 'NLL'):
pz_params = vae._get_prior_params(N * K).contiguous().view(N, K, -1)
nlogpz = vae.prior_dist.NLL(pz_params, qz_params).mean(0)
else:
nlogpz = - vae.prior_dist.log_density(qz_samples.transpose(0, 1)).mean(0)
# nlogqz_condx, nlogpz = analytical_NLL(qz_params, vae.q_dist, vae.prior_dist)
nlogqz_condx = nlogqz_condx.data
nlogpz = nlogpz.data
# Independence term
# KL(q(z)||prod_j q(z_j)) = log q(z) - sum_j log q(z_j)
dependence = (- joint_entropy + marginal_entropies.sum())[0]
# Information term
# KL(q(z|x)||q(z)) = log q(z|x) - log q(z)
information = (- nlogqz_condx.sum() + joint_entropy)[0]
# Dimension-wise KL term
# sum_j KL(q(z_j)||p(z_j)) = sum_j (log q(z_j) - log p(z_j))
dimwise_kl = (- marginal_entropies + nlogpz).sum()
# Compute sum of terms analytically
# KL(q(z|x)||p(z)) = log q(z|x) - log p(z)
analytical_cond_kl = (- nlogqz_condx + nlogpz).sum()
print('Dependence: {}'.format(dependence))
print('Information: {}'.format(information))
print('Dimension-wise KL: {}'.format(dimwise_kl))
print('Analytical E_p(x)[ KL(q(z|x)||p(z)) ]: {}'.format(analytical_cond_kl))
print('Estimated ELBO: {}'.format(logpx - analytical_cond_kl))
return logpx, dependence, information, dimwise_kl, analytical_cond_kl, marginal_entropies, joint_entropy
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-checkpt', required=True)
parser.add_argument('-save', type=str, default='.')
parser.add_argument('-gpu', type=int, default=0)
args = parser.parse_args()
def load_model_and_dataset(checkpt_filename):
checkpt = torch.load(checkpt_filename)
args = checkpt['args']
state_dict = checkpt['state_dict']
# backwards compatibility
if not hasattr(args, 'conv'):
args.conv = False
from vae_quant import VAE, setup_data_loaders
# model
if args.dist == 'normal':
prior_dist = dist.Normal()
q_dist = dist.Normal()
elif args.dist == 'laplace':
prior_dist = dist.Laplace()
q_dist = dist.Laplace()
elif args.dist == 'flow':
prior_dist = flows.FactorialNormalizingFlow(dim=args.latent_dim, nsteps=32)
q_dist = dist.Normal()
vae = VAE(z_dim=args.latent_dim, use_cuda=True, prior_dist=prior_dist, q_dist=q_dist, conv=args.conv)
vae.load_state_dict(state_dict, strict=False)
vae.eval()
# dataset loader
loader = setup_data_loaders(args, use_cuda=True)
return vae, loader
torch.cuda.set_device(args.gpu)
vae, dataset_loader = load_model_and_dataset(args.checkpt)
logpx, dependence, information, dimwise_kl, analytical_cond_kl, marginal_entropies, joint_entropy = \
elbo_decomposition(vae, dataset_loader)
torch.save({
'logpx': logpx,
'dependence': dependence,
'information': information,
'dimwise_kl': dimwise_kl,
'analytical_cond_kl': analytical_cond_kl,
'marginal_entropies': marginal_entropies,
'joint_entropy': joint_entropy
}, os.path.join(args.save, 'elbo_decomposition.pth'))
| 8,303 | 34.33617 | 109 | py |
beta-tcvae | beta-tcvae-master/metric_helpers/mi_metric.py | import torch
metric_name = 'MIG'
def MIG(mi_normed):
return torch.mean(mi_normed[:, 0] - mi_normed[:, 1])
def compute_metric_shapes(marginal_entropies, cond_entropies):
factor_entropies = [6, 40, 32, 32]
mutual_infos = marginal_entropies[None] - cond_entropies
mutual_infos = torch.sort(mutual_infos, dim=1, descending=True)[0].clamp(min=0)
mi_normed = mutual_infos / torch.Tensor(factor_entropies).log()[:, None]
metric = eval(metric_name)(mi_normed)
return metric
def compute_metric_faces(marginal_entropies, cond_entropies):
factor_entropies = [21, 11, 11]
mutual_infos = marginal_entropies[None] - cond_entropies
mutual_infos = torch.sort(mutual_infos, dim=1, descending=True)[0].clamp(min=0)
mi_normed = mutual_infos / torch.Tensor(factor_entropies).log()[:, None]
metric = eval(metric_name)(mi_normed)
return metric
| 882 | 31.703704 | 83 | py |
beta-tcvae | beta-tcvae-master/metric_helpers/loader.py | import torch
import lib.dist as dist
import lib.flows as flows
import vae_quant
def load_model_and_dataset(checkpt_filename):
print('Loading model and dataset.')
checkpt = torch.load(checkpt_filename, map_location=lambda storage, loc: storage)
args = checkpt['args']
state_dict = checkpt['state_dict']
# backwards compatibility
if not hasattr(args, 'conv'):
args.conv = False
if not hasattr(args, 'dist') or args.dist == 'normal':
prior_dist = dist.Normal()
q_dist = dist.Normal()
elif args.dist == 'laplace':
prior_dist = dist.Laplace()
q_dist = dist.Laplace()
elif args.dist == 'flow':
prior_dist = flows.FactorialNormalizingFlow(dim=args.latent_dim, nsteps=32)
q_dist = dist.Normal()
# model
if hasattr(args, 'ncon'):
# InfoGAN
model = infogan.Model(
args.latent_dim, n_con=args.ncon, n_cat=args.ncat, cat_dim=args.cat_dim, use_cuda=True, conv=args.conv)
model.load_state_dict(state_dict, strict=False)
vae = vae_quant.VAE(
z_dim=args.ncon, use_cuda=True, prior_dist=prior_dist, q_dist=q_dist, conv=args.conv)
vae.encoder = model.encoder
vae.decoder = model.decoder
else:
vae = vae_quant.VAE(
z_dim=args.latent_dim, use_cuda=True, prior_dist=prior_dist, q_dist=q_dist, conv=args.conv)
vae.load_state_dict(state_dict, strict=False)
# dataset loader
loader = vae_quant.setup_data_loaders(args)
return vae, loader.dataset, args
| 1,550 | 33.466667 | 115 | py |
beta-tcvae | beta-tcvae-master/lib/functions.py | import torch
from torch.autograd import Function
class STHeaviside(Function):
@staticmethod
def forward(ctx, x):
y = torch.zeros(x.size()).type_as(x)
y[x >= 0] = 1
return y
@staticmethod
def backward(ctx, grad_output):
return grad_output
| 290 | 17.1875 | 44 | py |
beta-tcvae | beta-tcvae-master/lib/utils.py | from numbers import Number
import math
import torch
import os
def save_checkpoint(state, save, epoch):
if not os.path.exists(save):
os.makedirs(save)
filename = os.path.join(save, 'checkpt-%04d.pth' % epoch)
torch.save(state, filename)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class RunningAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, momentum=0.97):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
def isnan(tensor):
return (tensor != tensor)
def logsumexp(value, dim=None, keepdim=False):
"""Numerically stable implementation of the operation
value.exp().sum(dim, keepdim).log()
"""
if dim is not None:
m, _ = torch.max(value, dim=dim, keepdim=True)
value0 = value - m
if keepdim is False:
m = m.squeeze(dim)
return m + torch.log(torch.sum(torch.exp(value0),
dim=dim, keepdim=keepdim))
else:
m = torch.max(value)
sum_exp = torch.sum(torch.exp(value - m))
if isinstance(sum_exp, Number):
return m + math.log(sum_exp)
else:
return m + torch.log(sum_exp)
| 1,828 | 23.716216 | 75 | py |
beta-tcvae | beta-tcvae-master/lib/datasets.py | import numpy as np
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
class Shapes(object):
def __init__(self, dataset_zip=None):
loc = 'data/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz'
if dataset_zip is None:
self.dataset_zip = np.load(loc, encoding='latin1')
else:
self.dataset_zip = dataset_zip
self.imgs = torch.from_numpy(self.dataset_zip['imgs']).float()
def __len__(self):
return self.imgs.size(0)
def __getitem__(self, index):
x = self.imgs[index].view(1, 64, 64)
return x
class Dataset(object):
def __init__(self, loc):
self.dataset = torch.load(loc).float().div(255).view(-1, 1, 64, 64)
def __len__(self):
return self.dataset.size(0)
@property
def ndim(self):
return self.dataset.size(1)
def __getitem__(self, index):
return self.dataset[index]
class Faces(Dataset):
LOC = 'data/basel_face_renders.pth'
def __init__(self):
return super(Faces, self).__init__(self.LOC)
| 1,102 | 22.978261 | 75 | py |
beta-tcvae | beta-tcvae-master/lib/dist.py | import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from lib.functions import STHeaviside
eps = 1e-8
class Normal(nn.Module):
"""Samples from a Normal distribution using the reparameterization trick.
"""
def __init__(self, mu=0, sigma=1):
super(Normal, self).__init__()
self.normalization = Variable(torch.Tensor([np.log(2 * np.pi)]))
self.mu = Variable(torch.Tensor([mu]))
self.logsigma = Variable(torch.Tensor([math.log(sigma)]))
def _check_inputs(self, size, mu_logsigma):
if size is None and mu_logsigma is None:
raise ValueError(
'Either one of size or params should be provided.')
elif size is not None and mu_logsigma is not None:
mu = mu_logsigma.select(-1, 0).expand(size)
logsigma = mu_logsigma.select(-1, 1).expand(size)
return mu, logsigma
elif size is not None:
mu = self.mu.expand(size)
logsigma = self.logsigma.expand(size)
return mu, logsigma
elif mu_logsigma is not None:
mu = mu_logsigma.select(-1, 0)
logsigma = mu_logsigma.select(-1, 1)
return mu, logsigma
else:
raise ValueError(
'Given invalid inputs: size={}, mu_logsigma={})'.format(
size, mu_logsigma))
def sample(self, size=None, params=None):
mu, logsigma = self._check_inputs(size, params)
std_z = Variable(torch.randn(mu.size()).type_as(mu.data))
sample = std_z * torch.exp(logsigma) + mu
return sample
def log_density(self, sample, params=None):
if params is not None:
mu, logsigma = self._check_inputs(None, params)
else:
mu, logsigma = self._check_inputs(sample.size(), None)
mu = mu.type_as(sample)
logsigma = logsigma.type_as(sample)
c = self.normalization.type_as(sample.data)
inv_sigma = torch.exp(-logsigma)
tmp = (sample - mu) * inv_sigma
return -0.5 * (tmp * tmp + 2 * logsigma + c)
def NLL(self, params, sample_params=None):
"""Analytically computes
E_N(mu_2,sigma_2^2) [ - log N(mu_1, sigma_1^2) ]
If mu_2, and sigma_2^2 are not provided, defaults to entropy.
"""
mu, logsigma = self._check_inputs(None, params)
if sample_params is not None:
sample_mu, sample_logsigma = self._check_inputs(None, sample_params)
else:
sample_mu, sample_logsigma = mu, logsigma
c = self.normalization.type_as(sample_mu.data)
nll = logsigma.mul(-2).exp() * (sample_mu - mu).pow(2) \
+ torch.exp(sample_logsigma.mul(2) - logsigma.mul(2)) + 2 * logsigma + c
return nll.mul(0.5)
def kld(self, params):
"""Computes KL(q||p) where q is the given distribution and p
is the standard Normal distribution.
"""
mu, logsigma = self._check_inputs(None, params)
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mean^2 - sigma^2)
kld = logsigma.mul(2).add(1) - mu.pow(2) - logsigma.exp().pow(2)
kld.mul_(-0.5)
return kld
def get_params(self):
return torch.cat([self.mu, self.logsigma])
@property
def nparams(self):
return 2
@property
def ndim(self):
return 1
@property
def is_reparameterizable(self):
return True
def __repr__(self):
tmpstr = self.__class__.__name__ + ' ({:.3f}, {:.3f})'.format(
self.mu.data[0], self.logsigma.exp().data[0])
return tmpstr
class Laplace(nn.Module):
"""Samples from a Laplace distribution using the reparameterization trick.
"""
def __init__(self, mu=0, scale=1):
super(Laplace, self).__init__()
self.normalization = Variable(torch.Tensor([-math.log(2)]))
self.mu = Variable(torch.Tensor([mu]))
self.logscale = Variable(torch.Tensor([math.log(scale)]))
def _check_inputs(self, size, mu_logscale):
if size is None and mu_logscale is None:
raise ValueError(
'Either one of size or params should be provided.')
elif size is not None and mu_logscale is not None:
mu = mu_logscale.select(-1, 0).expand(size)
logscale = mu_logscale.select(-1, 1).expand(size)
return mu, logscale
elif size is not None:
mu = self.mu.expand(size)
logscale = self.logscale.expand(size)
return mu, logscale
elif mu_logscale is not None:
mu = mu_logscale.select(-1, 0)
logscale = mu_logscale.select(-1, 1)
return mu, logscale
else:
raise ValueError(
'Given invalid inputs: size={}, mu_logscale={})'.format(
size, mu_logscale))
def sample(self, size=None, params=None):
mu, logscale = self._check_inputs(size, params)
scale = torch.exp(logscale)
# Unif(-0.5, 0.5)
u = Variable(torch.rand(mu.size()).type_as(mu.data)) - 0.5
sample = mu - scale * torch.sign(u) * torch.log(1 - 2 * torch.abs(u) + eps)
return sample
def log_density(self, sample, params=None):
if params is not None:
mu, logscale = self._check_inputs(None, params)
else:
mu, logscale = self._check_inputs(sample.size(), None)
mu = mu.type_as(sample)
logscale = logscale.type_as(sample)
c = self.normalization.type_as(sample.data)
inv_scale = torch.exp(-logscale)
ins_exp = - torch.abs(sample - mu) * inv_scale
return ins_exp + c - logscale
def get_params(self):
return torch.cat([self.mu, self.logscale])
@property
def nparams(self):
return 2
@property
def ndim(self):
return 1
@property
def is_reparameterizable(self):
return True
def __repr__(self):
tmpstr = self.__class__.__name__ + ' ({:.3f}, {:.3f})'.format(
self.mu.data[0], self.logscale.exp().data[0])
return tmpstr
class Bernoulli(nn.Module):
"""Samples from a Bernoulli distribution where the probability is given
by the sigmoid of the given parameter.
"""
def __init__(self, p=0.5, stgradient=False):
super(Bernoulli, self).__init__()
p = torch.Tensor([p])
self.p = Variable(torch.log(p / (1 - p) + eps))
self.stgradient = stgradient
def _check_inputs(self, size, ps):
if size is None and ps is None:
raise ValueError(
'Either one of size or params should be provided.')
elif size is not None and ps is not None:
if ps.ndimension() > len(size):
return ps.squeeze(-1).expand(size)
else:
return ps.expand(size)
elif size is not None:
return self.p.expand(size)
elif ps is not None:
return ps
else:
raise ValueError(
'Given invalid inputs: size={}, ps={})'.format(size, ps))
def _sample_logistic(self, size):
u = Variable(torch.rand(size))
l = torch.log(u + eps) - torch.log(1 - u + eps)
return l
def sample(self, size=None, params=None):
presigm_ps = self._check_inputs(size, params)
logp = F.logsigmoid(presigm_ps)
logq = F.logsigmoid(-presigm_ps)
l = self._sample_logistic(logp.size()).type_as(presigm_ps)
z = logp - logq + l
b = STHeaviside.apply(z)
return b if self.stgradient else b.detach()
def log_density(self, sample, params=None):
presigm_ps = self._check_inputs(sample.size(), params).type_as(sample)
p = (F.sigmoid(presigm_ps) + eps) * (1 - 2 * eps)
logp = sample * torch.log(p + eps) + (1 - sample) * torch.log(1 - p + eps)
return logp
def get_params(self):
return self.p
@property
def nparams(self):
return 1
@property
def ndim(self):
return 1
@property
def is_reparameterizable(self):
return self.stgradient
def __repr__(self):
tmpstr = self.__class__.__name__ + ' ({:.3f})'.format(
torch.sigmoid(self.p.data)[0])
return tmpstr
| 8,542 | 32.501961 | 84 | py |
beta-tcvae | beta-tcvae-master/lib/flows.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from lib.dist import Normal
eps = 1e-8
class FactorialNormalizingFlow(nn.Module):
def __init__(self, dim, nsteps):
super(FactorialNormalizingFlow, self).__init__()
self.dim = dim
self.nsteps = nsteps
self.x_dist = Normal()
self.scale = nn.Parameter(torch.Tensor(self.nsteps, self.dim))
self.weight = nn.Parameter(torch.Tensor(self.nsteps, self.dim))
self.bias = nn.Parameter(torch.Tensor(self.nsteps, self.dim))
self.reset_parameters()
def reset_parameters(self):
self.scale.data.normal_(0, 0.02)
self.weight.data.normal_(0, 0.02)
self.bias.data.normal_(0, 0.02)
def sample(self, batch_size):
raise NotImplementedError
def log_density(self, y, params=None):
assert(y.size(1) == self.dim)
x = y
logdetgrad = Variable(torch.zeros(y.size()).type_as(y.data))
for i in range(self.nsteps):
u = self.scale[i][None]
w = self.weight[i][None]
b = self.bias[i][None]
act = F.tanh(x * w + b)
x = x + u * act
logdetgrad = logdetgrad + torch.log(torch.abs(1 + u * (1 - act.pow(2)) * w) + eps)
logpx = self.x_dist.log_density(x)
logpy = logpx + logdetgrad
return logpy
| 1,405 | 30.244444 | 94 | py |
ewN2HDECAY | ewN2HDECAY-master/CommonFunctions.py | #!/usr/bin/env python
#Filename: CommonFunctions.py
###############################################################################################################
# #
# CommonFunctions #
# #
# Purpose: Function library for ewN2HDECAY. Contains often used functions. #
# Copyright: Copyright (C) 2019, Marcel Krause and Milada Margarete Muehlleitner #
# License: GNU General Public License (GNU GPL-3.0-or-later) #
# #
# ewN2HDECAY is released under GNU General Public License (GNU GPL-3.0-or-later). #
# This program is free software: you can redistribute it and/or modify it under the terms of the #
# GNU General Public License as published by the Free Software Foundation, either version 3 of #
# the License, or any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; #
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. #
# See the GNU General Public License for more details. #
# #
# You have received a copy (LICENSE.md) of the GNU General Public License along with this program #
# in the ewN2HDECAY root directory. #
# #
###############################################################################################################
#------------------------------#
# Import Modules #
#------------------------------#
import sys
import os
import errno
#-------------------------#
# Functions #
#-------------------------#
def queryBoolean(question):
'''
For a given yes/no question, check the validity of the answer and return the corresponding Boolean value.
'''
validTrue = {"yes": True, "y": True, "ye": True, "j": True, "ja": True, "1": True}
validFalse = {"no": False, "n": False, "nein": False, "0": False}
prompt = " [y/n] "
while True:
sys.stdout.write(question + prompt)
# Compatibility for Python 2 and 3
if(sys.version_info > (3,0)):
choice = input().lower()
else:
choice = raw_input().lower()
if choice in validTrue:
return True
elif choice in validFalse:
return False
else:
sys.stdout.write('Error: invalid input. Enter "y" or "n".\n\n')
| 3,333 | 55.508475 | 113 | py |
ewN2HDECAY | ewN2HDECAY-master/Config.py | #!/usr/bin/env python
#Filename: Config.py
##################################################################################################################
# #
# Configuration #
# #
# Purpose: Main configuration file of ewN2HDECAY. Contains all configuration #
# settings needed to change the program. #
# Copyright: Copyright (C) 2019, Marcel Krause and Milada Margarete Muehlleitner #
# License: GNU General Public License (GNU GPL-3.0-or-later) #
# #
# ewN2HDECAY is released under GNU General Public License (GNU GPL-3.0-or-later). #
# This program is free software: you can redistribute it and/or modify it under the terms of the #
# GNU General Public License as published by the Free Software Foundation, either version 3 of #
# the License, or any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; #
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. #
# See the GNU General Public License for more details. #
# #
# You have received a copy (LICENSE.md) of the GNU General Public License along with this program #
# in the ewN2HDECAY root directory. #
# #
##################################################################################################################
#---------------------#
# Shared #
#---------------------#
useRelativeLoopToolsPath = True # Set True if you want to set the path to LoopTools relative to the ewN2HDECAY installation path (useful if you installed LoopTools e.g. in a subdirectory of the ewN2HDECAY folder) or False if you want to use an absolute path to LoopTools
pathLoopTools = 'LoopTools-2.14/i686-CYGWIN_NT-10.0-WOW' # Specify the path to the LoopTools root folder (IMPORTANT: the path must never *end* with '/' and if useRelativePath is True, it must not *start* with '/' either! If useRelativePath is False, it depends on the OS if the full absolute path starts with '/' or not: on Windows, it typically does not, on Linux, it typically does)
pathLoopToolsLibs = 'lib' # Specify the LoopTools subfolder (relative to pathLoopTools) where the LoopTools libraries are contained (NOTE: this depends on the OS and chip architecture; on Windows, this is normally 'lib', on Linux and macOS, it is normally 'lib64')
pathLoopToolsExecs = 'bin' # Specify the LoopTools subfolder (relative to pathLoopTools) where the LoopTools libraries are contained (NOTE: this depends on the OS and chip architecture; on Windows, this is normally 'lib', on Linux and macOS, it is normally 'lib64')
pathToCygwin = 'C:\\cygwin\\bin\\bash.exe' # Specify the path to the Cygwin bash executable (for Windows only)
loopToolsVersion = 'LoopTools-2.14' # Specify the LoopTools version that shall be downloaded (recommended, as checked for compatibility: LoopTools-2.14)
renScaleDefinitions = ['MIN', 'MIN*DSQRT(2D0)**(-1)'] # Give definitions for possible renormalization scales in terms of the decaying Higgs mass MIN (NOTE: use FORTRAN functions and formats!) | 4,403 | 114.894737 | 384 | py |
ewN2HDECAY | ewN2HDECAY-master/setup.py | #!/usr/bin/env python
#Filename: setup.py
###############################################################################################################
# #
# setup.py #
# #
# Purpose: Calls the LoopTools installer #
# Makes the N2HDECAY sub-program #
# Sets up ewN2HDECAY #
# Creates the makefile and electroweakCorrections.F90 #
# Makes ewN2HDECAY #
# Copyright: Copyright (C) 2019, Marcel Krause and Milada Margarete Muehlleitner #
# License: GNU General Public License (GNU GPL-3.0-or-later) #
# #
# ewN2HDECAY is released under GNU General Public License (GNU GPL-3.0-or-later). #
# This program is free software: you can redistribute it and/or modify it under the terms of the #
# GNU General Public License as published by the Free Software Foundation, either version 3 of #
# the License, or any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; #
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. #
# See the GNU General Public License for more details. #
# #
# You have received a copy (LICENSE.md) of the GNU General Public License along with this program #
# in the ewN2HDECAY root directory. #
# #
###############################################################################################################
#------------------------------#
# Import Modules #
#------------------------------#
import os
from shutil import rmtree
import subprocess
import sys
from fnmatch import fnmatch
import CommonFunctions # Provides common, often used functions for different scripts of ewN2HDECAY
import Config # Provides paths
# Compatibility for Python 2 and 3
if(sys.version_info > (3,0)):
from urllib.request import urlopen
else:
from urllib import urlopen
#-------------------------#
# Functions #
#-------------------------#
def createMakefile(pathToMakefile, relativePathToLoopTools, relativePathToLoopToolsLibs, relativePathToLoopToolsExecs, useRelativePath, chosenCompiler):
# Get a list of all processes
pathToProcesses = 'BuildingBlocks' + os.sep + 'Processes'
processDirList = os.listdir(pathToProcesses)
# Get ewN2HDECAY's working directory and escape all whitespaces
prompt = ['pwd']
workingDirectory = subprocess.check_output(prompt, stdin=None, stderr=None, shell=False).decode('utf-8').replace(' ', '\ ')
# Check whether the OS is Windows or not for giving the decayWidth application the correct file ending
applicationEnding = ''
if os.name == 'nt':
applicationEnding = '.exe'
makefile = open(pathToMakefile, 'w')
makefile.truncate()
makefile.write("###################################\n")
makefile.write("# Variables and Paths #\n")
makefile.write("###################################\n\n")
makefile.write("# Specify the path to the LoopTools library:\n")
if useRelativePath:
makefile.write("PWD=" + workingDirectory + "\n")
makefile.write("LT=$(PWD)/" + relativePathToLoopTools + "\n")
else:
makefile.write("LT=" + relativePathToLoopTools + "\n")
makefile.write("LTCOMP = $(LT)/" + relativePathToLoopToolsExecs + "/fcc\n")
makefile.write("IFlags = -I$(LT)/include\n")
makefile.write("LFlags = -L$(LT)/" + relativePathToLoopToolsLibs + " -looptools\n\n")
makefile.write("# Choose your compiler:\n")
makefile.write("FCOMP = " + chosenCompiler + "\n\n")
makefile.write("# Do NOT change anything below this line by hand!\n")
makefile.write("SELFENERGIESUSU = BuildingBlocks/SelfEnergies/Usual\n")
makefile.write("SELFENERGIESALT = BuildingBlocks/SelfEnergies/Alternative\n")
makefile.write("SELFENERGIESDERIV = BuildingBlocks/SelfEnergiesDerivatives\n")
# makefile.write("PROCESSDEPENDENTSCHEME = BuildingBlocks/ProcessDependentScheme\n")
makefile.write("TADPOLES = BuildingBlocks/Tadpoles\n")
for singleProcess in processDirList:
makefile.write("PROCESS" + singleProcess.upper() + " = BuildingBlocks/Processes/" + singleProcess + "\n")
makefile.write("\nconstants.o: constants.F90\n")
makefile.write("\t$(FCOMP) constants.F90 -c -o constants.o $(IFlags)\n")
makefile.write("\ncounterterms.o: counterterms.F90\n")
makefile.write("\t$(FCOMP) counterterms.F90 -c -o counterterms.o $(IFlags)\n")
makefile.write("\ngetParameters.o: getParameters.F90\n")
makefile.write("\t$(FCOMP) getParameters.F90 -c -o getParameters.o $(IFlags)\n")
makefile.write("\nelectroweakCorrections.o: electroweakCorrections.F90\n")
makefile.write("\t$(FCOMP) electroweakCorrections.F90 -c -o electroweakCorrections.o $(IFlags)\n\n")
makefile.write("$(SELFENERGIESALT)/%.o: $(SELFENERGIESALT)/%.F90 constants.o\n")
makefile.write("\t$(FCOMP) -c -o $@ $< $(IFlags)\n\n")
makefile.write("$(SELFENERGIESUSU)/%.o: $(SELFENERGIESUSU)/%.F90 constants.o\n")
makefile.write("\t$(FCOMP) -c -o $@ $< $(IFlags)\n\n")
makefile.write("$(SELFENERGIESDERIV)/%.o: $(SELFENERGIESDERIV)/%.F90 constants.o\n")
makefile.write("\t$(FCOMP) -c -o $@ $< $(IFlags)\n\n")
# makefile.write("$(PROCESSDEPENDENTSCHEME)/%.o: $(PROCESSDEPENDENTSCHEME)/%.F90 constants.o\n")
# makefile.write("\t$(FCOMP) -c -o $@ $< $(IFlags)\n\n")
makefile.write("$(TADPOLES)/%.o: $(TADPOLES)/%.F90 constants.o\n")
makefile.write("\t$(FCOMP) -c -o $@ $< $(IFlags)\n\n")
for singleProcess in processDirList:
makefile.write("$(PROCESS" + singleProcess.upper() + ")/%.o: $(PROCESS" + singleProcess.upper() + ")/%.F90 constants.o counterterms.o\n")
makefile.write("\t$(FCOMP) -c -o $@ $< $(IFlags)\n")
makefile.write("\nelectroweakCorrections: constants.o $(SELFENERGIESUSU)/SelfAA.o $(SELFENERGIESUSU)/SelfAALight.o $(SELFENERGIESUSU)/SelfWpWp.o $(SELFENERGIESUSU)/SelfZ0Z0.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfAZ0.o $(SELFENERGIESUSU)/SelfAZ0ZeroMom.o $(SELFENERGIESALT)/SelfAA.o $(SELFENERGIESALT)/SelfWpWp.o $(SELFENERGIESALT)/SelfZ0Z0.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfAZ0.o $(SELFENERGIESALT)/SelfAZ0ZeroMom.o $(SELFENERGIESUSU)/SelfA0A0.o $(SELFENERGIESUSU)/SelfG0A0.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfG0G0.o $(SELFENERGIESUSU)/SelfGpGp.o $(SELFENERGIESUSU)/SelfGpHp.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfH1H1.o $(SELFENERGIESUSU)/SelfH1H2.o $(SELFENERGIESUSU)/SelfH1H3.o $(SELFENERGIESUSU)/SelfH2H2.o $(SELFENERGIESUSU)/SelfH2H3.o $(SELFENERGIESUSU)/SelfH3H3.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfHpHp.o $(SELFENERGIESALT)/SelfA0A0.o $(SELFENERGIESALT)/SelfG0A0.o $(SELFENERGIESALT)/SelfG0G0.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfGpGp.o $(SELFENERGIESALT)/SelfGpHp.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfH1H1.o $(SELFENERGIESALT)/SelfH1H2.o $(SELFENERGIESALT)/SelfH1H3.o $(SELFENERGIESALT)/SelfH2H2.o $(SELFENERGIESALT)/SelfH2H3.o $(SELFENERGIESALT)/SelfH3H3.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfHpHp.o $(SELFENERGIESDERIV)/DSelfAA.o $(SELFENERGIESDERIV)/DSelfAALight.o $(SELFENERGIESDERIV)/DSelfWpWp.o $(SELFENERGIESDERIV)/DSelfZ0Z0.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfAZ0.o $(SELFENERGIESDERIV)/DSelfA0A0.o $(SELFENERGIESDERIV)/DSelfG0A0.o $(SELFENERGIESDERIV)/DSelfG0G0.o $(SELFENERGIESDERIV)/DSelfGpGp.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfGpHp.o $(SELFENERGIESDERIV)/DSelfHpHp.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfH1H1.o $(SELFENERGIESDERIV)/DSelfH1H2.o $(SELFENERGIESDERIV)/DSelfH1H3.o $(SELFENERGIESDERIV)/DSelfH2H2.o $(SELFENERGIESDERIV)/DSelfH2H3.o $(SELFENERGIESDERIV)/DSelfH3H3.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfH1H2Add.o $(SELFENERGIESALT)/SelfH1H3Add.o $(SELFENERGIESALT)/SelfH2H3Add.o $(SELFENERGIESALT)/SelfG0A0Add.o $(SELFENERGIESALT)/SelfGpHpAdd.o \\\n")
makefile.write("\t$(TADPOLES)/TadH1.o $(TADPOLES)/TadH2.o $(TADPOLES)/TadH3.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfNeuENeuELeft.o $(SELFENERGIESUSU)/SelfNeuENeuERight.o $(SELFENERGIESUSU)/SelfNeuENeuEScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfNeuMNeuMLeft.o $(SELFENERGIESUSU)/SelfNeuMNeuMRight.o $(SELFENERGIESUSU)/SelfNeuMNeuMScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfNeuTNeuTLeft.o $(SELFENERGIESUSU)/SelfNeuTNeuTRight.o $(SELFENERGIESUSU)/SelfNeuTNeuTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfElElLeft.o $(SELFENERGIESUSU)/SelfElElRight.o $(SELFENERGIESUSU)/SelfElElScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfMuMuLeft.o $(SELFENERGIESUSU)/SelfMuMuRight.o $(SELFENERGIESUSU)/SelfMuMuScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfTauTauLeft.o $(SELFENERGIESUSU)/SelfTauTauRight.o $(SELFENERGIESUSU)/SelfTauTauScalar.o \\\n")
# makefile.write("\t$(SELFENERGIESUSU)/SelfTauTauLeftQED.o $(SELFENERGIESUSU)/SelfTauTauRightQED.o $(SELFENERGIESUSU)/SelfTauTauScalarQED.o \\\n")
# makefile.write("\t$(SELFENERGIESUSU)/SelfTauTauLeftWeak.o $(SELFENERGIESUSU)/SelfTauTauRightWeak.o $(SELFENERGIESUSU)/SelfTauTauScalarWeak.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfDDLeft.o $(SELFENERGIESUSU)/SelfDDRight.o $(SELFENERGIESUSU)/SelfDDScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfUULeft.o $(SELFENERGIESUSU)/SelfUURight.o $(SELFENERGIESUSU)/SelfUUScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfSSLeft.o $(SELFENERGIESUSU)/SelfSSRight.o $(SELFENERGIESUSU)/SelfSSScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfCCLeft.o $(SELFENERGIESUSU)/SelfCCRight.o $(SELFENERGIESUSU)/SelfCCScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfBBLeft.o $(SELFENERGIESUSU)/SelfBBRight.o $(SELFENERGIESUSU)/SelfBBScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfTTLeft.o $(SELFENERGIESUSU)/SelfTTRight.o $(SELFENERGIESUSU)/SelfTTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfUCLeft.o $(SELFENERGIESUSU)/SelfUCRight.o $(SELFENERGIESUSU)/SelfUCScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfUTLeft.o $(SELFENERGIESUSU)/SelfUTRight.o $(SELFENERGIESUSU)/SelfUTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfCTLeft.o $(SELFENERGIESUSU)/SelfCTRight.o $(SELFENERGIESUSU)/SelfCTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfDSLeft.o $(SELFENERGIESUSU)/SelfDSRight.o $(SELFENERGIESUSU)/SelfDSScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfDBLeft.o $(SELFENERGIESUSU)/SelfDBRight.o $(SELFENERGIESUSU)/SelfDBScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfSBLeft.o $(SELFENERGIESUSU)/SelfSBRight.o $(SELFENERGIESUSU)/SelfSBScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfNeuENeuELeft.o $(SELFENERGIESALT)/SelfNeuENeuERight.o $(SELFENERGIESALT)/SelfNeuENeuEScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfNeuMNeuMLeft.o $(SELFENERGIESALT)/SelfNeuMNeuMRight.o $(SELFENERGIESALT)/SelfNeuMNeuMScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfNeuTNeuTLeft.o $(SELFENERGIESALT)/SelfNeuTNeuTRight.o $(SELFENERGIESALT)/SelfNeuTNeuTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfElElLeft.o $(SELFENERGIESALT)/SelfElElRight.o $(SELFENERGIESALT)/SelfElElScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfMuMuLeft.o $(SELFENERGIESALT)/SelfMuMuRight.o $(SELFENERGIESALT)/SelfMuMuScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfTauTauLeft.o $(SELFENERGIESALT)/SelfTauTauRight.o $(SELFENERGIESALT)/SelfTauTauScalar.o \\\n")
# makefile.write("\t$(SELFENERGIESALT)/SelfTauTauLeftQED.o $(SELFENERGIESALT)/SelfTauTauRightQED.o $(SELFENERGIESALT)/SelfTauTauScalarQED.o \\\n")
# makefile.write("\t$(SELFENERGIESALT)/SelfTauTauLeftWeak.o $(SELFENERGIESALT)/SelfTauTauRightWeak.o $(SELFENERGIESALT)/SelfTauTauScalarWeak.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfDDLeft.o $(SELFENERGIESALT)/SelfDDRight.o $(SELFENERGIESALT)/SelfDDScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfUULeft.o $(SELFENERGIESALT)/SelfUURight.o $(SELFENERGIESALT)/SelfUUScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfSSLeft.o $(SELFENERGIESALT)/SelfSSRight.o $(SELFENERGIESALT)/SelfSSScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfCCLeft.o $(SELFENERGIESALT)/SelfCCRight.o $(SELFENERGIESALT)/SelfCCScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfBBLeft.o $(SELFENERGIESALT)/SelfBBRight.o $(SELFENERGIESALT)/SelfBBScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfTTLeft.o $(SELFENERGIESALT)/SelfTTRight.o $(SELFENERGIESALT)/SelfTTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfUCLeft.o $(SELFENERGIESALT)/SelfUCRight.o $(SELFENERGIESALT)/SelfUCScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfUTLeft.o $(SELFENERGIESALT)/SelfUTRight.o $(SELFENERGIESALT)/SelfUTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfCTLeft.o $(SELFENERGIESALT)/SelfCTRight.o $(SELFENERGIESALT)/SelfCTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfDSLeft.o $(SELFENERGIESALT)/SelfDSRight.o $(SELFENERGIESALT)/SelfDSScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfDBLeft.o $(SELFENERGIESALT)/SelfDBRight.o $(SELFENERGIESALT)/SelfDBScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfSBLeft.o $(SELFENERGIESALT)/SelfSBRight.o $(SELFENERGIESALT)/SelfSBScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfNeuENeuELeft.o $(SELFENERGIESDERIV)/DSelfNeuENeuERight.o $(SELFENERGIESDERIV)/DSelfNeuENeuEScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfNeuMNeuMLeft.o $(SELFENERGIESDERIV)/DSelfNeuMNeuMRight.o $(SELFENERGIESDERIV)/DSelfNeuMNeuMScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfNeuTNeuTLeft.o $(SELFENERGIESDERIV)/DSelfNeuTNeuTRight.o $(SELFENERGIESDERIV)/DSelfNeuTNeuTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfElElLeft.o $(SELFENERGIESDERIV)/DSelfElElRight.o $(SELFENERGIESDERIV)/DSelfElElScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfMuMuLeft.o $(SELFENERGIESDERIV)/DSelfMuMuRight.o $(SELFENERGIESDERIV)/DSelfMuMuScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfTauTauLeft.o $(SELFENERGIESDERIV)/DSelfTauTauRight.o $(SELFENERGIESDERIV)/DSelfTauTauScalar.o \\\n")
# makefile.write("\t$(SELFENERGIESDERIV)/DSelfTauTauLeftQED.o $(SELFENERGIESDERIV)/DSelfTauTauRightQED.o $(SELFENERGIESDERIV)/DSelfTauTauScalarQED.o \\\n")
# makefile.write("\t$(SELFENERGIESDERIV)/DSelfTauTauLeftWeak.o $(SELFENERGIESDERIV)/DSelfTauTauRightWeak.o $(SELFENERGIESDERIV)/DSelfTauTauScalarWeak.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfDDLeft.o $(SELFENERGIESDERIV)/DSelfDDRight.o $(SELFENERGIESDERIV)/DSelfDDScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfUULeft.o $(SELFENERGIESDERIV)/DSelfUURight.o $(SELFENERGIESDERIV)/DSelfUUScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfSSLeft.o $(SELFENERGIESDERIV)/DSelfSSRight.o $(SELFENERGIESDERIV)/DSelfSSScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfCCLeft.o $(SELFENERGIESDERIV)/DSelfCCRight.o $(SELFENERGIESDERIV)/DSelfCCScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfBBLeft.o $(SELFENERGIESDERIV)/DSelfBBRight.o $(SELFENERGIESDERIV)/DSelfBBScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfTTLeft.o $(SELFENERGIESDERIV)/DSelfTTRight.o $(SELFENERGIESDERIV)/DSelfTTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfUCLeft.o $(SELFENERGIESDERIV)/DSelfUCRight.o $(SELFENERGIESDERIV)/DSelfUCScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfUTLeft.o $(SELFENERGIESDERIV)/DSelfUTRight.o $(SELFENERGIESDERIV)/DSelfUTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfCTLeft.o $(SELFENERGIESDERIV)/DSelfCTRight.o $(SELFENERGIESDERIV)/DSelfCTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfDSLeft.o $(SELFENERGIESDERIV)/DSelfDSRight.o $(SELFENERGIESDERIV)/DSelfDSScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfDBLeft.o $(SELFENERGIESDERIV)/DSelfDBRight.o $(SELFENERGIESDERIV)/DSelfDBScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfSBLeft.o $(SELFENERGIESDERIV)/DSelfSBRight.o $(SELFENERGIESDERIV)/DSelfSBScalar.o \\\n")
# makefile.write("\t$(PROCESSDEPENDENTSCHEME)/A0toTauPTauMProcDepVC.o $(PROCESSDEPENDENTSCHEME)/HHtoTauPTauMProcDepVC.o $(PROCESSDEPENDENTSCHEME)/h0toTauPTauMProcDepVC.o \\\n")
# makefile.write("\t$(PROCESSDEPENDENTSCHEME)/A0toN1N1ProcDepRelVC.o $(PROCESSDEPENDENTSCHEME)/h0toN1N1ProcDepRelVC.o $(PROCESSDEPENDENTSCHEME)/HHtoN1N1ProcDepRelVC.o \\\n")
# makefile.write("\t$(PROCESSDEPENDENTSCHEME)/A0toN2N2ProcDepRelVC.o $(PROCESSDEPENDENTSCHEME)/h0toN2N2ProcDepRelVC.o $(PROCESSDEPENDENTSCHEME)/HHtoN2N2ProcDepRelVC.o \\\n")
for singleProcess in processDirList:
makefile.write("\t$(PROCESS" + singleProcess.upper() + ")/TreeLevelWidthRed.o $(PROCESS" + singleProcess.upper() + ")/NLOWidthRed.o $(PROCESS" + singleProcess.upper() + ")/NLOTadWidthRed.o $(PROCESS" + singleProcess.upper() + ")/Counterterm.o $(PROCESS" + singleProcess.upper() + ")/RealCorrections.o \\\n")
makefile.write("\tcounterterms.o getParameters.o electroweakCorrections.o\n")
makefile.write("\t$(FCOMP) $(IFlags) constants.o $(SELFENERGIESUSU)/SelfAA.o $(SELFENERGIESUSU)/SelfAALight.o $(SELFENERGIESUSU)/SelfWpWp.o $(SELFENERGIESUSU)/SelfZ0Z0.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfAZ0.o $(SELFENERGIESUSU)/SelfAZ0ZeroMom.o $(SELFENERGIESALT)/SelfAA.o $(SELFENERGIESALT)/SelfWpWp.o $(SELFENERGIESALT)/SelfZ0Z0.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfAZ0.o $(SELFENERGIESALT)/SelfAZ0ZeroMom.o $(SELFENERGIESUSU)/SelfA0A0.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfG0A0.o $(SELFENERGIESUSU)/SelfG0G0.o $(SELFENERGIESUSU)/SelfGpGp.o $(SELFENERGIESUSU)/SelfGpHp.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfH1H1.o $(SELFENERGIESUSU)/SelfH1H2.o $(SELFENERGIESUSU)/SelfH1H3.o $(SELFENERGIESUSU)/SelfH2H2.o $(SELFENERGIESUSU)/SelfH2H3.o $(SELFENERGIESUSU)/SelfH3H3.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfHpHp.o $(SELFENERGIESALT)/SelfA0A0.o $(SELFENERGIESALT)/SelfG0A0.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfG0G0.o $(SELFENERGIESALT)/SelfGpGp.o $(SELFENERGIESALT)/SelfGpHp.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfH1H1.o $(SELFENERGIESALT)/SelfH1H2.o $(SELFENERGIESALT)/SelfH1H3.o $(SELFENERGIESALT)/SelfH2H2.o $(SELFENERGIESALT)/SelfH2H3.o $(SELFENERGIESALT)/SelfH3H3.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfHpHp.o $(SELFENERGIESDERIV)/DSelfAA.o $(SELFENERGIESDERIV)/DSelfAALight.o $(SELFENERGIESDERIV)/DSelfWpWp.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfZ0Z0.o $(SELFENERGIESDERIV)/DSelfAZ0.o $(SELFENERGIESDERIV)/DSelfA0A0.o $(SELFENERGIESDERIV)/DSelfG0A0.o $(SELFENERGIESDERIV)/DSelfG0G0.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfGpGp.o $(SELFENERGIESDERIV)/DSelfGpHp.o $(SELFENERGIESDERIV)/DSelfHpHp.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfH1H1.o $(SELFENERGIESDERIV)/DSelfH1H2.o $(SELFENERGIESDERIV)/DSelfH1H3.o $(SELFENERGIESDERIV)/DSelfH2H2.o $(SELFENERGIESDERIV)/DSelfH2H3.o $(SELFENERGIESDERIV)/DSelfH3H3.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfH1H2Add.o $(SELFENERGIESALT)/SelfH1H3Add.o $(SELFENERGIESALT)/SelfH2H3Add.o $(SELFENERGIESALT)/SelfG0A0Add.o $(SELFENERGIESALT)/SelfGpHpAdd.o \\\n")
makefile.write("\t$(TADPOLES)/TadH1.o $(TADPOLES)/TadH2.o $(TADPOLES)/TadH3.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfNeuENeuELeft.o $(SELFENERGIESUSU)/SelfNeuENeuERight.o $(SELFENERGIESUSU)/SelfNeuENeuEScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfNeuMNeuMLeft.o $(SELFENERGIESUSU)/SelfNeuMNeuMRight.o $(SELFENERGIESUSU)/SelfNeuMNeuMScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfNeuTNeuTLeft.o $(SELFENERGIESUSU)/SelfNeuTNeuTRight.o $(SELFENERGIESUSU)/SelfNeuTNeuTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfElElLeft.o $(SELFENERGIESUSU)/SelfElElRight.o $(SELFENERGIESUSU)/SelfElElScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfMuMuLeft.o $(SELFENERGIESUSU)/SelfMuMuRight.o $(SELFENERGIESUSU)/SelfMuMuScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfTauTauLeft.o $(SELFENERGIESUSU)/SelfTauTauRight.o $(SELFENERGIESUSU)/SelfTauTauScalar.o \\\n")
# makefile.write("\t$(SELFENERGIESUSU)/SelfTauTauLeftQED.o $(SELFENERGIESUSU)/SelfTauTauRightQED.o $(SELFENERGIESUSU)/SelfTauTauScalarQED.o \\\n")
# makefile.write("\t$(SELFENERGIESUSU)/SelfTauTauLeftWeak.o $(SELFENERGIESUSU)/SelfTauTauRightWeak.o $(SELFENERGIESUSU)/SelfTauTauScalarWeak.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfDDLeft.o $(SELFENERGIESUSU)/SelfDDRight.o $(SELFENERGIESUSU)/SelfDDScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfUULeft.o $(SELFENERGIESUSU)/SelfUURight.o $(SELFENERGIESUSU)/SelfUUScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfSSLeft.o $(SELFENERGIESUSU)/SelfSSRight.o $(SELFENERGIESUSU)/SelfSSScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfCCLeft.o $(SELFENERGIESUSU)/SelfCCRight.o $(SELFENERGIESUSU)/SelfCCScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfBBLeft.o $(SELFENERGIESUSU)/SelfBBRight.o $(SELFENERGIESUSU)/SelfBBScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfTTLeft.o $(SELFENERGIESUSU)/SelfTTRight.o $(SELFENERGIESUSU)/SelfTTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfUCLeft.o $(SELFENERGIESUSU)/SelfUCRight.o $(SELFENERGIESUSU)/SelfUCScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfUTLeft.o $(SELFENERGIESUSU)/SelfUTRight.o $(SELFENERGIESUSU)/SelfUTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfCTLeft.o $(SELFENERGIESUSU)/SelfCTRight.o $(SELFENERGIESUSU)/SelfCTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfDSLeft.o $(SELFENERGIESUSU)/SelfDSRight.o $(SELFENERGIESUSU)/SelfDSScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfDBLeft.o $(SELFENERGIESUSU)/SelfDBRight.o $(SELFENERGIESUSU)/SelfDBScalar.o \\\n")
makefile.write("\t$(SELFENERGIESUSU)/SelfSBLeft.o $(SELFENERGIESUSU)/SelfSBRight.o $(SELFENERGIESUSU)/SelfSBScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfNeuENeuELeft.o $(SELFENERGIESALT)/SelfNeuENeuERight.o $(SELFENERGIESALT)/SelfNeuENeuEScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfNeuMNeuMLeft.o $(SELFENERGIESALT)/SelfNeuMNeuMRight.o $(SELFENERGIESALT)/SelfNeuMNeuMScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfNeuTNeuTLeft.o $(SELFENERGIESALT)/SelfNeuTNeuTRight.o $(SELFENERGIESALT)/SelfNeuTNeuTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfElElLeft.o $(SELFENERGIESALT)/SelfElElRight.o $(SELFENERGIESALT)/SelfElElScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfMuMuLeft.o $(SELFENERGIESALT)/SelfMuMuRight.o $(SELFENERGIESALT)/SelfMuMuScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfTauTauLeft.o $(SELFENERGIESALT)/SelfTauTauRight.o $(SELFENERGIESALT)/SelfTauTauScalar.o \\\n")
# makefile.write("\t$(SELFENERGIESALT)/SelfTauTauLeftQED.o $(SELFENERGIESALT)/SelfTauTauRightQED.o $(SELFENERGIESALT)/SelfTauTauScalarQED.o \\\n")
# makefile.write("\t$(SELFENERGIESALT)/SelfTauTauLeftWeak.o $(SELFENERGIESALT)/SelfTauTauRightWeak.o $(SELFENERGIESALT)/SelfTauTauScalarWeak.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfDDLeft.o $(SELFENERGIESALT)/SelfDDRight.o $(SELFENERGIESALT)/SelfDDScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfUULeft.o $(SELFENERGIESALT)/SelfUURight.o $(SELFENERGIESALT)/SelfUUScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfSSLeft.o $(SELFENERGIESALT)/SelfSSRight.o $(SELFENERGIESALT)/SelfSSScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfCCLeft.o $(SELFENERGIESALT)/SelfCCRight.o $(SELFENERGIESALT)/SelfCCScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfBBLeft.o $(SELFENERGIESALT)/SelfBBRight.o $(SELFENERGIESALT)/SelfBBScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfTTLeft.o $(SELFENERGIESALT)/SelfTTRight.o $(SELFENERGIESALT)/SelfTTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfUCLeft.o $(SELFENERGIESALT)/SelfUCRight.o $(SELFENERGIESALT)/SelfUCScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfUTLeft.o $(SELFENERGIESALT)/SelfUTRight.o $(SELFENERGIESALT)/SelfUTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfCTLeft.o $(SELFENERGIESALT)/SelfCTRight.o $(SELFENERGIESALT)/SelfCTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfDSLeft.o $(SELFENERGIESALT)/SelfDSRight.o $(SELFENERGIESALT)/SelfDSScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfDBLeft.o $(SELFENERGIESALT)/SelfDBRight.o $(SELFENERGIESALT)/SelfDBScalar.o \\\n")
makefile.write("\t$(SELFENERGIESALT)/SelfSBLeft.o $(SELFENERGIESALT)/SelfSBRight.o $(SELFENERGIESALT)/SelfSBScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfNeuENeuELeft.o $(SELFENERGIESDERIV)/DSelfNeuENeuERight.o $(SELFENERGIESDERIV)/DSelfNeuENeuEScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfNeuMNeuMLeft.o $(SELFENERGIESDERIV)/DSelfNeuMNeuMRight.o $(SELFENERGIESDERIV)/DSelfNeuMNeuMScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfNeuTNeuTLeft.o $(SELFENERGIESDERIV)/DSelfNeuTNeuTRight.o $(SELFENERGIESDERIV)/DSelfNeuTNeuTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfElElLeft.o $(SELFENERGIESDERIV)/DSelfElElRight.o $(SELFENERGIESDERIV)/DSelfElElScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfMuMuLeft.o $(SELFENERGIESDERIV)/DSelfMuMuRight.o $(SELFENERGIESDERIV)/DSelfMuMuScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfTauTauLeft.o $(SELFENERGIESDERIV)/DSelfTauTauRight.o $(SELFENERGIESDERIV)/DSelfTauTauScalar.o \\\n")
# makefile.write("\t$(SELFENERGIESDERIV)/DSelfTauTauLeftQED.o $(SELFENERGIESDERIV)/DSelfTauTauRightQED.o $(SELFENERGIESDERIV)/DSelfTauTauScalarQED.o \\\n")
# makefile.write("\t$(SELFENERGIESDERIV)/DSelfTauTauLeftWeak.o $(SELFENERGIESDERIV)/DSelfTauTauRightWeak.o $(SELFENERGIESDERIV)/DSelfTauTauScalarWeak.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfDDLeft.o $(SELFENERGIESDERIV)/DSelfDDRight.o $(SELFENERGIESDERIV)/DSelfDDScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfUULeft.o $(SELFENERGIESDERIV)/DSelfUURight.o $(SELFENERGIESDERIV)/DSelfUUScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfSSLeft.o $(SELFENERGIESDERIV)/DSelfSSRight.o $(SELFENERGIESDERIV)/DSelfSSScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfCCLeft.o $(SELFENERGIESDERIV)/DSelfCCRight.o $(SELFENERGIESDERIV)/DSelfCCScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfBBLeft.o $(SELFENERGIESDERIV)/DSelfBBRight.o $(SELFENERGIESDERIV)/DSelfBBScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfTTLeft.o $(SELFENERGIESDERIV)/DSelfTTRight.o $(SELFENERGIESDERIV)/DSelfTTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfUCLeft.o $(SELFENERGIESDERIV)/DSelfUCRight.o $(SELFENERGIESDERIV)/DSelfUCScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfUTLeft.o $(SELFENERGIESDERIV)/DSelfUTRight.o $(SELFENERGIESDERIV)/DSelfUTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfCTLeft.o $(SELFENERGIESDERIV)/DSelfCTRight.o $(SELFENERGIESDERIV)/DSelfCTScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfDSLeft.o $(SELFENERGIESDERIV)/DSelfDSRight.o $(SELFENERGIESDERIV)/DSelfDSScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfDBLeft.o $(SELFENERGIESDERIV)/DSelfDBRight.o $(SELFENERGIESDERIV)/DSelfDBScalar.o \\\n")
makefile.write("\t$(SELFENERGIESDERIV)/DSelfSBLeft.o $(SELFENERGIESDERIV)/DSelfSBRight.o $(SELFENERGIESDERIV)/DSelfSBScalar.o \\\n")
# makefile.write("\t$(PROCESSDEPENDENTSCHEME)/A0toTauPTauMProcDepVC.o $(PROCESSDEPENDENTSCHEME)/HHtoTauPTauMProcDepVC.o $(PROCESSDEPENDENTSCHEME)/h0toTauPTauMProcDepVC.o \\\n")
# makefile.write("\t$(PROCESSDEPENDENTSCHEME)/A0toN1N1ProcDepRelVC.o $(PROCESSDEPENDENTSCHEME)/h0toN1N1ProcDepRelVC.o $(PROCESSDEPENDENTSCHEME)/HHtoN1N1ProcDepRelVC.o \\\n")
# makefile.write("\t$(PROCESSDEPENDENTSCHEME)/A0toN2N2ProcDepRelVC.o $(PROCESSDEPENDENTSCHEME)/h0toN2N2ProcDepRelVC.o $(PROCESSDEPENDENTSCHEME)/HHtoN2N2ProcDepRelVC.o \\\n")
for singleProcess in processDirList:
makefile.write("\t$(PROCESS" + singleProcess.upper() + ")/TreeLevelWidthRed.o $(PROCESS" + singleProcess.upper() + ")/NLOWidthRed.o $(PROCESS" + singleProcess.upper() + ")/NLOTadWidthRed.o $(PROCESS" + singleProcess.upper() + ")/Counterterm.o $(PROCESS" + singleProcess.upper() + ")/RealCorrections.o \\\n")
makefile.write("\tcounterterms.o getParameters.o electroweakCorrections.o $(LFlags) -o electroweakCorrections" + applicationEnding + "\n\n")
makefile.write("clean:\n")
makefile.write("\trm -f *.o\n")
makefile.write("\trm -f $(SELFENERGIESALT)/*.o\n")
makefile.write("\trm -f $(SELFENERGIESUSU)/*.o\n")
makefile.write("\trm -f $(SELFENERGIESDERIV)/*.o\n")
# makefile.write("\trm -f $(PROCESSDEPENDENTSCHEME)/*.o\n")
makefile.write("\trm -f $(TADPOLES)/*.o\n")
for singleProcess in processDirList:
makefile.write("\trm -f $(PROCESS" + singleProcess.upper() + ")/*.o\n")
makefile.close()
def createElectroweakCorrections():
# Get the correct OS separator for OS-independent support of the input read-in
currentOS = sys.platform
if currentOS == 'win32':
OStype = 0
pathToOutputSeparator = '\\\\'
else:
OStype = 1
pathToOutputSeparator = '/'
# Get the process list and the list of masses
processDir = "BuildingBlocks" + os.sep + "Processes"
processList = os.listdir(processDir)
processDescriptionList = [None] * len(processList)
longestName = 0
previousDecayparticle = ""
for pickProcess in processList:
if len(pickProcess) > longestName:
longestName = len(pickProcess)
processDescFilePath = processDir + os.sep + pickProcess + os.sep + "processDescription.txt"
if not os.path.isfile(processDescFilePath):
print('Error: process description file for process {} not found! Please add the processDescription.txt file first to the {} folder. Terminating the setup of ewN2HDECAY now. Setup incomplete.'.format(pickProcess, pickProcess))
sys.exit()
else:
lines = [line.rstrip('\n') for line in open(processDescFilePath)]
massString = lines[1]
massStringList = massString.split(',')
symmetryFactorString = (lines[4].split())[0]
processDesc = [pickProcess, lines[0], massStringList[0], massStringList[1], massStringList[2], symmetryFactorString]
isIncluded = (lines[2].split())[0]
wantedPosition = int((lines[3].split())[0]) - 1
if isIncluded == '1':
processDescriptionList[wantedPosition] = processDesc
processDescriptionList = [x for x in processDescriptionList if x is not None]
# Print the electroweakCorrections.F90 file
electroweakCorrectionsFile = open("electroweakCorrections.F90", 'w')
electroweakCorrectionsFile.truncate()
electroweakCorrectionsFile.write("program electroweakCorrections\n")
electroweakCorrectionsFile.write("\tuse constants\n")
electroweakCorrectionsFile.write("\tuse counterterms\n")
electroweakCorrectionsFile.write("\timplicit none\n")
electroweakCorrectionsFile.write('#include "looptools.h"\n')
electroweakCorrectionsFile.write("\tcharacter(len=26) :: tempVal\n")
electroweakCorrectionsFile.write("\tcharacter(len=2) :: tempVal2, tempVal3\n")
electroweakCorrectionsFile.write("\tcharacter(len=32) :: arg\n")
electroweakCorrectionsFile.write("\tcharacter(len=50) :: fileName, fileNameFilled, targetName\n")
electroweakCorrectionsFile.write("\tcharacter(len=600000) :: outputFileContent, outputFileContent2\n")
electroweakCorrectionsFile.write("\tcharacter(300), parameter :: pathToOutputFiles = 'N2HDECAY" + pathToOutputSeparator + "'\n")
electroweakCorrectionsFile.write("\tinteger arguments(7)\n")
electroweakCorrectionsFile.write("\tinteger, parameter :: maxNumberSchemes = 10\n")
electroweakCorrectionsFile.write("\tlogical :: debugModeOn = .false.\n")
electroweakCorrectionsFile.write("\tlogical :: resultsAreZero = .false.\n")
electroweakCorrectionsFile.write("\tdouble precision prefactor, treeLevelWidth, NLOWidth(maxNumberSchemes), fullamplitude(maxNumberSchemes)\n")
electroweakCorrectionsFile.write("\tdouble precision NLOVCwidth, NLOVCwoIRwidth, NLOIRonlywidth\n")
for x in range(0, len(processDescriptionList)):
electroweakCorrectionsFile.write("\tdouble precision " + processDescriptionList[x][0] + "Tree, " + processDescriptionList[x][0] + "CT, " + processDescriptionList[x][0] + "Real\n")
electroweakCorrectionsFile.write("\tdouble complex " + processDescriptionList[x][0] + "VC, " + processDescriptionList[x][0] + "Tad\n")
electroweakCorrectionsFile.write("\tdouble precision treeLevelTemp, realCorrectionsTemp\n")
electroweakCorrectionsFile.write("\tdouble complex vertexCorrectionsTemp, vertexTadpolesTemp\n")
electroweakCorrectionsFile.write("\tdouble precision m1, m2, m3, kinematicThreshold\n")
electroweakCorrectionsFile.write("\tdouble precision inScaleDebug\n")
electroweakCorrectionsFile.write("\tdouble precision betaScheme(maxNumberSchemes), betaDistr\n")
electroweakCorrectionsFile.write("\tdouble precision alpha1Scheme(maxNumberSchemes), alpha1Distr\n")
electroweakCorrectionsFile.write("\tdouble precision alpha2Scheme(maxNumberSchemes), alpha2Distr\n")
electroweakCorrectionsFile.write("\tdouble precision alpha3Scheme(maxNumberSchemes), alpha3Distr\n")
electroweakCorrectionsFile.write("\tdouble precision vSScheme(maxNumberSchemes), vSDistr\n")
electroweakCorrectionsFile.write("\tdouble precision alpha1Orig, alpha2Orig, alpha3Orig, betaOrig, m12squaredOrig, vSOrig\n")
electroweakCorrectionsFile.write("\tdouble precision dm122MSBarTemp, dm122MSBarDiff\n")
electroweakCorrectionsFile.write("\tdouble precision alpha1AtMT, alpha2AtMT, alpha3AtMT, betaAtMT\n")
electroweakCorrectionsFile.write("\tinteger m, n, o, p, q, r, fileNameLength, point, statWrite\n\n")
electroweakCorrectionsFile.write("\t! Copyright (C) 2019, Marcel Krause and Milada Margarete Muehlleitner\n\n")
electroweakCorrectionsFile.write("\t! License: GNU General Public License (GNU GPL-3.0-or-later)\n\n")
electroweakCorrectionsFile.write("\t! ewN2HDECAY is released under GNU General Public License (GNU GPL-3.0-or-later).\n")
electroweakCorrectionsFile.write("\t! This program is free software: you can redistribute it and/or modify it under the terms of the\n")
electroweakCorrectionsFile.write("\t! GNU General Public License as published by the Free Software Foundation, either version 3 of\n")
electroweakCorrectionsFile.write("\t! the License, or any later version.\n\n")
electroweakCorrectionsFile.write("\t! This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;\n")
electroweakCorrectionsFile.write("\t! without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n")
electroweakCorrectionsFile.write("\t! See the GNU General Public License for more details.\n\n")
electroweakCorrectionsFile.write("\t! You have received a copy (LICENSE.md) of the GNU General Public License along with this program\n")
electroweakCorrectionsFile.write("\t! in the ewN2HDECAY root directory.\n\n")
electroweakCorrectionsFile.write("\t! Get the command line arguments standing for the different running options\n")
electroweakCorrectionsFile.write("\t! Argument 1: perform UV divergence check (1: true, 0: false; default: 0)\n")
electroweakCorrectionsFile.write("\t! Argument 2: perform IR divergence check (1: true, 0: false; default: 0)\n")
electroweakCorrectionsFile.write("\t! Argument 3: perform gauge dependence check (2: true (prompt for continuation of program if gauge-dependence is detected), 1: true (no prompts for continuation), 0: false; default: 0)\n")
electroweakCorrectionsFile.write("\t! Argument 4: perform numerical evaluation (1: true, 0: false; default: 1)\n")
electroweakCorrectionsFile.write("\t! Argument 5: relative path to the 2HDM input parameter file, starting from the Parameters directory of ewN2HDECAY\n")
electroweakCorrectionsFile.write("\t! Argument 6: relative path to the target file containing the results of the calculation, starting from the Temp/Results directory of ewN2HDECAY\n")
electroweakCorrectionsFile.write("\t! Argument 7: indicates whether a fixed renormalization scale is used or a dynamic one for each process (0: fixed scale, 1: dynamic scale for each process)\n")
electroweakCorrectionsFile.write("\tdo o = 1, iargc()\n")
electroweakCorrectionsFile.write("\t\tcall getarg(o, arg)\n")
electroweakCorrectionsFile.write("\t\tif (arg == '1') then\n")
electroweakCorrectionsFile.write("\t\t\targuments(o) = 1\n")
electroweakCorrectionsFile.write("\t\telse if (arg == '2') then\n")
electroweakCorrectionsFile.write("\t\t\targuments(o) = 2\n")
electroweakCorrectionsFile.write("\t\telse if (arg == '0') then\n")
electroweakCorrectionsFile.write("\t\t\targuments(o) = 0\n")
electroweakCorrectionsFile.write("\t\telse\n")
electroweakCorrectionsFile.write("\t\t\tif (o == 5) then\n")
electroweakCorrectionsFile.write("\t\t\t\tfileName = arg\n")
electroweakCorrectionsFile.write("\t\t\telse if (o == 6) then\n")
electroweakCorrectionsFile.write("\t\t\t\ttargetName = arg\n")
electroweakCorrectionsFile.write("\t\t\tend if\n")
electroweakCorrectionsFile.write("\t\tend if\n")
electroweakCorrectionsFile.write("\tend do\n\n")
electroweakCorrectionsFile.write("\t! Perform the numerical evaluation\n\n")
electroweakCorrectionsFile.write("\t\t! Reset all values\n")
electroweakCorrectionsFile.write("\t\tGaugeXiA = 1D0\n")
electroweakCorrectionsFile.write("\t\tGaugeXiW = 1D0\n")
electroweakCorrectionsFile.write("\t\tGaugeXiZ = 1D0\n\n")
electroweakCorrectionsFile.write("\t\t! Calculate all values\n")
electroweakCorrectionsFile.write("\t\tcall ltini\n")
electroweakCorrectionsFile.write("\t\t\t! Set default values for the loop calculations\n")
electroweakCorrectionsFile.write("\t\t\tcall setlambda(1D0)\n")
electroweakCorrectionsFile.write("\t\t\tcall setdelta(0D0)\n")
electroweakCorrectionsFile.write("\t\t\tIRLambda = getlambda()\n\n")
electroweakCorrectionsFile.write('\t\t\t! Use this hack to "fill up" the string to the maximum length with whitespace characters so that it can be passed to the subroutine call\n')
electroweakCorrectionsFile.write("\t\t\tfileName = fileName // ' '\n")
electroweakCorrectionsFile.write("\t\t\ttargetName = targetName // ' '\n\n")
electroweakCorrectionsFile.write("\t\t\t! Get all parameters\n")
electroweakCorrectionsFile.write('\t\t\tcall getParameters(' + str(OStype) + ', 0)\n\n')
# electroweakCorrectionsFile.write("\t\t\t! Define different renormalization scale\n")
# electroweakCorrectionsFile.write("\t\t\tif (arguments(7) == 0) then\n")
# electroweakCorrectionsFile.write("\t\t\t\tread(OutputScaleReadIn, *) OutputScale\n")
# electroweakCorrectionsFile.write("\t\t\t\tcall setmudim(OutputScale**2)\n")
# electroweakCorrectionsFile.write("\t\t\tend if\n\n")
electroweakCorrectionsFile.write("\t\t\t! Prepare the output file header\n")
electroweakCorrectionsFile.write('\t\t\toutputFileContent = ""\n\n')
electroweakCorrectionsFile.write("\t\t\t! Copy the original values of alpha, beta and m12squared\n")
electroweakCorrectionsFile.write('\t\t\talpha1Orig = alpha1\n')
electroweakCorrectionsFile.write('\t\t\talpha2Orig = alpha2\n')
electroweakCorrectionsFile.write('\t\t\talpha3Orig = alpha3\n')
electroweakCorrectionsFile.write('\t\t\tbetaOrig = beta\n')
electroweakCorrectionsFile.write('\t\t\tm12squaredOrig = m12squared\n')
electroweakCorrectionsFile.write('\t\t\tvSOrig = vS\n\n')
electroweakCorrectionsFile.write('\t\t\t! Print out the current point in phase-space (debug mode only)\n')
electroweakCorrectionsFile.write('\t\t\tif (debugModeOn) then\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "omitELCorr: ", omitELCorr\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "MW: ", MW\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "MZ: ", MZ\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "SW: ", SW\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "CW: ", CW\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "alphaAtMZ: ", alphaAtMZ\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "EL: ", EL\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "vev: ", (2D0*MW*SW/EL)\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "ME: ", ME\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "MM: ", MM\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "ML: ", ML\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "MU: ", MU\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "MD: ", MD\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "MS: ", MS\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "MC: ", MC\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "MB: ", MB\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "MT: ", MT\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "CKM11: ", CKM11\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "CKM12: ", CKM12\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "CKM13: ", CKM13\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "CKM21: ", CKM21\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "CKM22: ", CKM22\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "CKM23: ", CKM23\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "CKM31: ", CKM31\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "CKM32: ", CKM32\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "CKM33: ", CKM33\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "CKMC11: ", CKMC11\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "CKMC12: ", CKMC12\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "CKMC13: ", CKMC13\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "CKMC21: ", CKMC21\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "CKMC22: ", CKMC22\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "CKMC23: ", CKMC23\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "CKMC31: ", CKMC31\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "CKMC32: ", CKMC32\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "CKMC33: ", CKMC33\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "MH1: ", MH1\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "MH2: ", MH2\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "MH3: ", MH3\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "MA0: ", MA0\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "MHp: ", MHp\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "R11: ", RR11\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "R12: ", RR12\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "R13: ", RR13\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "R21: ", RR21\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "R22: ", RR22\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "R23: ", RR23\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "R31: ", RR31\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "R32: ", RR32\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "R33: ", RR33\n')
# electroweakCorrectionsFile.write('\t\t\t\tdo m = 1, maxNumberSchemes, 1\n')
# electroweakCorrectionsFile.write('\t\t\t\t\twrite (*,*) "alpha(", m , "): ", alphaScheme(m)\n')
# electroweakCorrectionsFile.write('\t\t\t\tend do\n')
# electroweakCorrectionsFile.write('\t\t\t\tdo m = 1, maxNumberSchemes, 1\n')
# electroweakCorrectionsFile.write('\t\t\t\t\twrite (*,*) "beta(", m , "): ", betaScheme(m)\n')
# electroweakCorrectionsFile.write('\t\t\t\tend do\n')
# electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "Yuk1: ", Yuk1\n')
# electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "Yuk2: ", Yuk2\n')
# electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "Yuk3: ", Yuk3\n')
# electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "Yuk4: ", Yuk4\n')
# electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "Yuk5: ", Yuk5\n')
# electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "Yuk6: ", Yuk6\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "m12squared: ", m12squared\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "vS: ", vS\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "2HDM Type: ", TypeOf2HDM\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "InputScale: ", InputScale\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "OutputScale: ", OutputScaleReadIn\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "ParamType: ", parameterType\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "RenormScheme: ", RenormScheme\n')
electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "RefScheme: ", RefScheme\n')
# electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "hdecayLam1: ", hdecayLam1\n')
# electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "hdecayLam2: ", hdecayLam2\n')
# electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "hdecayLam3: ", hdecayLam3\n')
# electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "hdecayLam4: ", hdecayLam4\n')
# electroweakCorrectionsFile.write('\t\t\t\twrite (*,*) "hdecayLam5: ", hdecayLam5\n')
electroweakCorrectionsFile.write('\t\t\tend if\n\n')
electroweakCorrectionsFile.write('\t\t\t! Evolve alpha_i and beta to the top scale for the gamma_top calculation in HDECAY\n')
electroweakCorrectionsFile.write('\t\t\talpha1 = alpha1Orig\n')
electroweakCorrectionsFile.write('\t\t\talpha2 = alpha2Orig\n')
electroweakCorrectionsFile.write('\t\t\talpha3 = alpha3Orig\n')
electroweakCorrectionsFile.write('\t\t\tbeta = betaOrig\n')
electroweakCorrectionsFile.write('\t\t\tm12squared = m12squaredOrig\n')
electroweakCorrectionsFile.write('\t\t\tvS = vSOrig\n')
electroweakCorrectionsFile.write('\t\t\talpha1AtMT = alpha1\n')
electroweakCorrectionsFile.write('\t\t\talpha2AtMT = alpha2\n')
electroweakCorrectionsFile.write('\t\t\talpha3AtMT = alpha3\n')
electroweakCorrectionsFile.write('\t\t\tbetaAtMT = beta\n')
electroweakCorrectionsFile.write('\t\t\tif (RenormScheme .EQ. 9) then\n')
electroweakCorrectionsFile.write('\t\t\t\tEvalScale = InputScale\n')
electroweakCorrectionsFile.write('\t\t\t\talpha1Distr = dAlpha1MSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\talpha2Distr = dAlpha2MSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\talpha3Distr = dAlpha3MSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\tbetaDistr = dBetaMSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\tEvalScale = MT\n')
electroweakCorrectionsFile.write('\t\t\t\talpha1AtMT = alpha1Orig + alpha1Distr - dAlpha1MSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\talpha2AtMT = alpha2Orig + alpha2Distr - dAlpha2MSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\talpha3AtMT = alpha3Orig + alpha3Distr - dAlpha3MSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\tbetaAtMT = betaOrig + betaDistr - dBetaMSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\tend if\n')
electroweakCorrectionsFile.write('\t\t\tif (RenormScheme .EQ. 10) then\n')
electroweakCorrectionsFile.write('\t\t\t\tEvalScale = InputScale\n')
electroweakCorrectionsFile.write('\t\t\t\talpha1Distr = dAlpha1MSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\talpha2Distr = dAlpha2MSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\talpha3Distr = dAlpha3MSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\tbetaDistr = dBetaMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\tEvalScale = MT\n')
electroweakCorrectionsFile.write('\t\t\t\talpha1AtMT = alpha1Orig + alpha1Distr - dAlpha1MSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\talpha2AtMT = alpha2Orig + alpha2Distr - dAlpha2MSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\talpha3AtMT = alpha3Orig + alpha3Distr - dAlpha3MSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\tbetaAtMT = betaOrig + betaDistr - dBetaMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\tend if\n\n')
electroweakCorrectionsFile.write('\t\t\t! Write the results to the output file\n')
electroweakCorrectionsFile.write('\t\t\topen(unit=44, file=trim(pathToOutputFiles)//trim("alphaandbeta.dat"), status=\'replace\', &\n')
electroweakCorrectionsFile.write('\t\t\t&action=\'write\', iostat=statWrite)\n')
electroweakCorrectionsFile.write('\t\t\t\tif ( statWrite == 0) then\n')
electroweakCorrectionsFile.write('\t\t\t\t\toutputFileContent2 = ""\n')
electroweakCorrectionsFile.write('\t\t\t\t\twrite( tempVal, \'(ES23.15E3)\' ) alpha1AtMT\n')
electroweakCorrectionsFile.write('\t\t\t\t\toutputFileContent2 = trim(outputFileContent2) // "alpha1 ="\n')
electroweakCorrectionsFile.write('\t\t\t\t\toutputFileContent2 = trim(outputFileContent2) // " " // tempVal\n')
electroweakCorrectionsFile.write('\t\t\t\t\twrite(44,*) trim(outputFileContent2)\n')
electroweakCorrectionsFile.write('\t\t\t\t\toutputFileContent2 = ""\n')
electroweakCorrectionsFile.write('\t\t\t\t\twrite( tempVal, \'(ES23.15E3)\' ) alpha2AtMT\n')
electroweakCorrectionsFile.write('\t\t\t\t\toutputFileContent2 = trim(outputFileContent2) // "alpha2 ="\n')
electroweakCorrectionsFile.write('\t\t\t\t\toutputFileContent2 = trim(outputFileContent2) // " " // tempVal\n')
electroweakCorrectionsFile.write('\t\t\t\t\twrite(44,*) trim(outputFileContent2)\n')
electroweakCorrectionsFile.write('\t\t\t\t\toutputFileContent2 = ""\n')
electroweakCorrectionsFile.write('\t\t\t\t\twrite( tempVal, \'(ES23.15E3)\' ) alpha3AtMT\n')
electroweakCorrectionsFile.write('\t\t\t\t\toutputFileContent2 = trim(outputFileContent2) // "alpha3 ="\n')
electroweakCorrectionsFile.write('\t\t\t\t\toutputFileContent2 = trim(outputFileContent2) // " " // tempVal\n')
electroweakCorrectionsFile.write('\t\t\t\t\twrite(44,*) trim(outputFileContent2)\n')
electroweakCorrectionsFile.write('\t\t\t\t\toutputFileContent2 = ""\n')
electroweakCorrectionsFile.write('\t\t\t\t\twrite( tempVal, \'(ES23.15E3)\' ) betaAtMT\n')
electroweakCorrectionsFile.write('\t\t\t\t\toutputFileContent2 = trim(outputFileContent2) // "beta ="\n')
electroweakCorrectionsFile.write('\t\t\t\t\toutputFileContent2 = trim(outputFileContent2) // " " // tempVal\n')
electroweakCorrectionsFile.write('\t\t\t\t\twrite(44,*) trim(outputFileContent2)\n')
electroweakCorrectionsFile.write('\t\t\t\telse\n')
electroweakCorrectionsFile.write('\t\t\t\t\twrite(*,*) \'ERROR: could not create output file for writing!\'\n')
electroweakCorrectionsFile.write('\t\t\t\tend if\n')
electroweakCorrectionsFile.write('\t\t\tclose(unit=44)\n\n')
for x in range(0, len(processDescriptionList)):
decayHeaderNeed = False
if (processDescriptionList[x][1][:2]).strip() != previousDecayparticle:
previousDecayparticle = (processDescriptionList[x][1][:2]).strip()
decayHeaderNeed = True
starString = ""
for y in range(0, (32 - len(processDescriptionList[x][1]))):
starString += "*"
starString2 = ""
for y in range(0, (21 - len(previousDecayparticle))):
starString2 += "*"
additionalWhitespaces = ""
for y in range(0, (longestName - len(processDescriptionList[x][0]))):
additionalWhitespaces += " "
symmetryFactor = processDescriptionList[x][5]
electroweakCorrectionsFile.write('\t\t\t\t! PROCESS ' + processDescriptionList[x][1] + '\n')
electroweakCorrectionsFile.write("\t\t\t\t\t! Get the original values of alpha_i, beta, m12squared and vS and calculate the angle-dependent parameters\n")
electroweakCorrectionsFile.write('\t\t\t\t\talpha1 = alpha1Orig\n')
electroweakCorrectionsFile.write('\t\t\t\t\talpha2 = alpha2Orig\n')
electroweakCorrectionsFile.write('\t\t\t\t\talpha3 = alpha3Orig\n')
electroweakCorrectionsFile.write('\t\t\t\t\tbeta = betaOrig\n')
electroweakCorrectionsFile.write('\t\t\t\t\tm12squared = m12squaredOrig\n')
electroweakCorrectionsFile.write('\t\t\t\t\tvS = vSOrig\n')
electroweakCorrectionsFile.write('\t\t\t\t\tcall getParameters(' + str(OStype) + ', 1)\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t! Kinematic prefactor together with the symmetry factor of the process\n')
# electroweakCorrectionsFile.write('\t\t\t\t\twrite (*,*) "----- ' + processDescriptionList[x][1] + ' -----"\n')
electroweakCorrectionsFile.write('\t\t\t\t\tm1 = ' + processDescriptionList[x][2] + '\n')
electroweakCorrectionsFile.write('\t\t\t\t\tm2 = ' + processDescriptionList[x][3] + '\n')
electroweakCorrectionsFile.write('\t\t\t\t\tm3 = ' + processDescriptionList[x][4] + '\n')
electroweakCorrectionsFile.write('\t\t\t\t\tkinematicThreshold = m1**2 - (m2 + m3)**2\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t! Define different renormalization scale\n')
electroweakCorrectionsFile.write('\t\t\t\t\tif (arguments(7) >= 1) then\n')
for z in Config.renScaleDefinitions:
electroweakCorrectionsFile.write('\t\t\t\t\t\tif ("' + z + '" == trim(OutputScaleReadIn)) then\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tOutputScale = ' + z.replace('MIN', 'm1') + '\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcall setmudim((' + z.replace('MIN', 'm1') + ')**2)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\tend if\n')
electroweakCorrectionsFile.write('\t\t\t\t\tend if\n\n')
# electroweakCorrectionsFile.write('\t\t\t\t\twrite (*,*) "RenScale:", getmudim()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t! Evolve m12squared to the output scale\n')
electroweakCorrectionsFile.write('\t\t\t\t\tcall clearcache\n')
electroweakCorrectionsFile.write('\t\t\t\t\tEvalScale = InputScale\n')
electroweakCorrectionsFile.write('\t\t\t\t\tdm122MSBarTemp = dm122MSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\tEvalScale = OutputScale\n')
electroweakCorrectionsFile.write('\t\t\t\t\tdm122MSBarDiff = dm122MSBarTemp - dm122MSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\tm12squared = m12squared + dm122MSBarDiff\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\tif (omitELCorr .EQ. 1) then\n')
# electroweakCorrectionsFile.write('\t\t\t\t\t\twrite (*,*) "The electroweak corrections to the process ' + processDescriptionList[x][1] + ' are not calculated since OMIT ELW2 is set to 1."\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\tresultsAreZero = .true.\n')
electroweakCorrectionsFile.write('\t\t\t\t\telse if (m1 .LE. 0D0) then\n')
# electroweakCorrectionsFile.write('\t\t\t\t\t\twrite (*,*) "The process ' + processDescriptionList[x][1] + ' has a massless particle in the initial state. A decay of massless&\n\t\t\t\t\t\t\t\t& particles is not supported. The LO and NLO widths are set to zero manually."\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\tresultsAreZero = .true.\n')
electroweakCorrectionsFile.write('\t\t\t\t\telse if (kinematicThreshold .LT. 0) then\n')
# electroweakCorrectionsFile.write('\t\t\t\t\t\twrite (*,*) "The process ' + processDescriptionList[x][1] + ' does not fulfill the kinematic threshold.&\n\t\t\t\t\t\t\t\t& The LO and NLO widths are set to zero manually."\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\tresultsAreZero = .true.\n')
electroweakCorrectionsFile.write('\t\t\t\t\telse\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\tresultsAreZero = .false.\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t! Kinematic prefactor\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\tprefactor = 1D0/' + symmetryFactor + 'D0 * DSQRT(m1**4 + m2**4 + m3**4 - 2D0*m1**2*m2**2 - 2D0*m1**2*m3**2 &\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t& - 2D0*m2**2*m3**2 )/(16D0*PI*m1**3)\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t! Get the parameters alpha_i, beta and vS in every scheme\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\tcall clearcache\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\tselect case (RefScheme)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcase (0)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha1Distr = 0D0\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha2Distr = 0D0\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha3Distr = 0D0\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tbetaDistr = 0D0\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tvSDistr = 0D0\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcase (1)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha1Distr = dAlpha1KanUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha2Distr = dAlpha2KanUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha3Distr = dAlpha3KanUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tbetaDistr = dBeta1KanUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tvSDistr = dvSMSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcase (2)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha1Distr = dAlpha1KanUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha2Distr = dAlpha2KanUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha3Distr = dAlpha3KanUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tbetaDistr = dBeta2KanUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tvSDistr = dvSMSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcase (3)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha1Distr = dAlpha1KanAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha2Distr = dAlpha2KanAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha3Distr = dAlpha3KanAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tbetaDistr = dBeta1KanAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tvSDistr = dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcase (4)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha1Distr = dAlpha1KanAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha2Distr = dAlpha2KanAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha3Distr = dAlpha3KanAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tbetaDistr = dBeta2KanAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tvSDistr = dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcase (5)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha1Distr = dAlpha1PinchPStar()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha2Distr = dAlpha2PinchPStar()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha3Distr = dAlpha3PinchPStar()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tbetaDistr = dBeta1PinchPStar()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tvSDistr = dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcase (6)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha1Distr = dAlpha1PinchPStar()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha2Distr = dAlpha2PinchPStar()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha3Distr = dAlpha3PinchPStar()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tbetaDistr = dBeta2PinchPStar()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tvSDistr = dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcase (7)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha1Distr = dAlpha1PinchOS()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha2Distr = dAlpha2PinchOS()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha3Distr = dAlpha3PinchOS()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tbetaDistr = dBeta1PinchOS()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tvSDistr = dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcase (8)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha1Distr = dAlpha1PinchOS()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha2Distr = dAlpha2PinchOS()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha3Distr = dAlpha3PinchOS()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tbetaDistr = dBeta2PinchOS()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tvSDistr = dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcase (9)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tEvalScale = InputScale\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha1Distr = dAlpha1MSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha2Distr = dAlpha2MSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha3Distr = dAlpha3MSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tbetaDistr = dBetaMSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tvSDistr = dvSMSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcase (10)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tEvalScale = InputScale\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha1Distr = dAlpha1MSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha2Distr = dAlpha2MSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha3Distr = dAlpha3MSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tbetaDistr = dBetaMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tvSDistr = dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\tend select\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\tif (RefScheme .EQ. 0) then\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tdo m = 1, maxNumberSchemes, 1\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha1Scheme(m) = alpha1\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha2Scheme(m) = alpha2\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha3Scheme(m) = alpha3\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tbetaScheme(m) = beta\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tvSScheme(m) = vS\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tend do\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcall clearcache\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tEvalScale = InputScale\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha1Distr = dAlpha1MSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha2Distr = dAlpha2MSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha3Distr = dAlpha3MSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tEvalScale = OutputScale\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcall clearcache\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha1Scheme(9) = alpha1Orig + alpha1Distr - dAlpha1MSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha2Scheme(9) = alpha2Orig + alpha2Distr - dAlpha2MSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha3Scheme(9) = alpha3Orig + alpha3Distr - dAlpha3MSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcall clearcache\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tEvalScale = InputScale\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha1Distr = dAlpha1MSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha2Distr = dAlpha2MSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha3Distr = dAlpha3MSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tEvalScale = OutputScale\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcall clearcache\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha1Scheme(10) = alpha1Orig + alpha1Distr - dAlpha1MSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha2Scheme(10) = alpha2Orig + alpha2Distr - dAlpha2MSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha3Scheme(10) = alpha3Orig + alpha3Distr - dAlpha3MSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcall clearcache\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tEvalScale = InputScale\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tbetaDistr = dBetaMSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tEvalScale = OutputScale\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcall clearcache\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tbetaScheme(9) = betaOrig + betaDistr - dBetaMSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcall clearcache\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tEvalScale = InputScale\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tbetaDistr = dBetaMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tEvalScale = OutputScale\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcall clearcache\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tbetaScheme(10) = betaOrig + betaDistr - dBetaMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcall clearcache\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tEvalScale = InputScale\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tvSDistr = dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tEvalScale = OutputScale\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcall clearcache\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tvSScheme(3) = vSOrig + vSDistr - dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tvSScheme(4) = vSOrig + vSDistr - dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tvSScheme(5) = vSOrig + vSDistr - dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tvSScheme(6) = vSOrig + vSDistr - dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tvSScheme(7) = vSOrig + vSDistr - dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tvSScheme(8) = vSOrig + vSDistr - dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tvSScheme(10) = vSOrig + vSDistr - dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\telse\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tcall clearcache\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tEvalScale = OutputScale\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha1Scheme(1) = alpha1Orig + alpha1Distr - dAlpha1KanUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha1Scheme(2) = alpha1Orig + alpha1Distr - dAlpha1KanUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha1Scheme(3) = alpha1Orig + alpha1Distr - dAlpha1KanAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha1Scheme(4) = alpha1Orig + alpha1Distr - dAlpha1KanAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha1Scheme(5) = alpha1Orig + alpha1Distr - dAlpha1PinchPStar()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha1Scheme(6) = alpha1Orig + alpha1Distr - dAlpha1PinchPStar()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha1Scheme(7) = alpha1Orig + alpha1Distr - dAlpha1PinchOS()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha1Scheme(8) = alpha1Orig + alpha1Distr - dAlpha1PinchOS()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha1Scheme(9) = alpha1Orig + alpha1Distr - dAlpha1MSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha1Scheme(10) = alpha1Orig + alpha1Distr - dAlpha1MSBarAlter()\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha2Scheme(1) = alpha2Orig + alpha2Distr - dAlpha2KanUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha2Scheme(2) = alpha2Orig + alpha2Distr - dAlpha2KanUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha2Scheme(3) = alpha2Orig + alpha2Distr - dAlpha2KanAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha2Scheme(4) = alpha2Orig + alpha2Distr - dAlpha2KanAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha2Scheme(5) = alpha2Orig + alpha2Distr - dAlpha2PinchPStar()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha2Scheme(6) = alpha2Orig + alpha2Distr - dAlpha2PinchPStar()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha2Scheme(7) = alpha2Orig + alpha2Distr - dAlpha2PinchOS()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha2Scheme(8) = alpha2Orig + alpha2Distr - dAlpha2PinchOS()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha2Scheme(9) = alpha2Orig + alpha2Distr - dAlpha2MSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha2Scheme(10) = alpha2Orig + alpha2Distr - dAlpha2MSBarAlter()\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha3Scheme(1) = alpha3Orig + alpha3Distr - dAlpha3KanUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha3Scheme(2) = alpha3Orig + alpha3Distr - dAlpha3KanUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha3Scheme(3) = alpha3Orig + alpha3Distr - dAlpha3KanAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha3Scheme(4) = alpha3Orig + alpha3Distr - dAlpha3KanAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha3Scheme(5) = alpha3Orig + alpha3Distr - dAlpha3PinchPStar()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha3Scheme(6) = alpha3Orig + alpha3Distr - dAlpha3PinchPStar()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha3Scheme(7) = alpha3Orig + alpha3Distr - dAlpha3PinchOS()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha3Scheme(8) = alpha3Orig + alpha3Distr - dAlpha3PinchOS()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha3Scheme(9) = alpha3Orig + alpha3Distr - dAlpha3MSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\talpha3Scheme(10) = alpha3Orig + alpha3Distr - dAlpha3MSBarAlter()\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tbetaScheme(1) = betaOrig + betaDistr - dBeta1KanUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tbetaScheme(2) = betaOrig + betaDistr - dBeta2KanUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tbetaScheme(3) = betaOrig + betaDistr - dBeta1KanAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tbetaScheme(4) = betaOrig + betaDistr - dBeta2KanAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tbetaScheme(5) = betaOrig + betaDistr - dBeta1PinchPStar()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tbetaScheme(6) = betaOrig + betaDistr - dBeta2PinchPStar()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tbetaScheme(7) = betaOrig + betaDistr - dBeta1PinchOS()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tbetaScheme(8) = betaOrig + betaDistr - dBeta2PinchOS()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tbetaScheme(9) = betaOrig + betaDistr - dBetaMSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tbetaScheme(10) = betaOrig + betaDistr - dBetaMSBarAlter()\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tvSScheme(1) = vSOrig + vSDistr - dvSMSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tvSScheme(2) = vSOrig + vSDistr - dvSMSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tvSScheme(3) = vSOrig + vSDistr - dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tvSScheme(4) = vSOrig + vSDistr - dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tvSScheme(5) = vSOrig + vSDistr - dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tvSScheme(6) = vSOrig + vSDistr - dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tvSScheme(7) = vSOrig + vSDistr - dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tvSScheme(8) = vSOrig + vSDistr - dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tvSScheme(9) = vSOrig + vSDistr - dvSMSBarUsual()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tvSScheme(10) = vSOrig + vSDistr - dvSMSBarAlter()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\tend if\n\n')
if decayHeaderNeed:
electroweakCorrectionsFile.write('\t\t\t\t\t\t! Prepare the output file content\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "**************"\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " ' + previousDecayparticle + ' decays"\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " (mixing angles, m12^2 and vS) ' + starString2 + '\\n"\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t! Write the values of alpha_i, beta, m12squared and vS in the chosen scheme(s) to the output file\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\tif (RenormScheme .EQ. 0) then\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tdo m = 1, maxNumberSchemes, 1\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) alpha1Scheme(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha1-" // trim(tempVal2) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha1-" // "1" // trim(tempVal3) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tend do\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tdo m = 1, maxNumberSchemes, 1\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) alpha2Scheme(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha2-" // trim(tempVal2) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha2-" // "1" // trim(tempVal3) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tend do\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tdo m = 1, maxNumberSchemes, 1\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) alpha3Scheme(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha3-" // trim(tempVal2) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha3-" // "1" // trim(tempVal3) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tend do\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tdo m = 1, maxNumberSchemes, 1\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) betaScheme(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "beta-" // trim(tempVal2) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "beta-" // "1" // trim(tempVal3) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tend do\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tdo m = 1, maxNumberSchemes, 1\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) vSScheme(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "vS-" // trim(tempVal2) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "vS-" // "1" // trim(tempVal3) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tend do\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\telse\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tm = RenormScheme\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) alpha1Scheme(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha1-" // trim(tempVal2) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha1-" // "1" // trim(tempVal3) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) alpha2Scheme(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha2-" // trim(tempVal2) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha2-" // "1" // trim(tempVal3) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) alpha3Scheme(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha3-" // trim(tempVal2) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha3-" // "1" // trim(tempVal3) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) betaScheme(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "beta-" // trim(tempVal2) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "beta-" // "1" // trim(tempVal3) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) vSScheme(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "vS-" // trim(tempVal2) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "vS-" // "1" // trim(tempVal3) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\tend if\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) m12squared\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "m12squared ="\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t! Prepare the output file content\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "**************"\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " ' + processDescriptionList[x][1] + '"\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " (electroweak corrections) ' + starString + '\\n"\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t! Check if all schemes shall be calculated or only a specific one\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\tif (RenormScheme .EQ. 0) then\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t! Get the full NLO decay width for all schemes\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tdo m = 1, maxNumberSchemes, 1\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Grab the current values of alpha_i and beta and calculate the angle-dependent parameters\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha1 = alpha1Scheme(m)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha2 = alpha2Scheme(m)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha3 = alpha3Scheme(m)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tbeta = betaScheme(m)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tvS = vSScheme(m)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tcall getParameters(' + str(OStype) + ', 1)\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Calculate the NLO ingredients\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tcall clearcache\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\ttreeLevelTemp = ' + processDescriptionList[x][0] + 'Tree()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tvertexCorrectionsTemp = ' + processDescriptionList[x][0] + 'VC()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tvertexTadpolesTemp = ' + processDescriptionList[x][0] + 'Tad()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\trealCorrectionsTemp = ' + processDescriptionList[x][0] + 'Real()\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Get the full tree-level decay width\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\ttreeLevelWidth = prefactor*treeLevelTemp\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Schemes 1, 2 and 9 are without tadpoles\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tif ((m == 1) .OR. (m == 2) .OR. (m == 9)) then\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\tfullamplitude(m) = treeLevelTemp + 2D0*DBLE(vertexCorrectionsTemp) + &\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\t\t\t& 2D0*' + processDescriptionList[x][0] + 'CT(m)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\tcall clearcache\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\tNLOWidth(m) = prefactor*( fullamplitude(m) + realCorrectionsTemp )\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\telse\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\tfullamplitude(m) = treeLevelTemp + 2D0*DBLE(vertexCorrectionsTemp + vertexTadpolesTemp) + &\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\t\t\t& 2D0*' + processDescriptionList[x][0] + 'CT(m)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\tcall clearcache\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\tNLOWidth(m) = prefactor*( fullamplitude(m) + realCorrectionsTemp )\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tend if\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Write the tree-level width to the output file\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) treeLevelWidth\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'LO" // trim(tempVal2) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'LO" // "1" // trim(tempVal3) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Write the NLO width to the output file\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) NLOWidth(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'NLO" // trim(tempVal2) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'NLO" // "1" // trim(tempVal3) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tend do\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\telse if (RenormScheme .GT. maxNumberSchemes) then\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\twrite (*,*) "Invalid renormalization scheme. The chosen scheme number must be below the maximum&\n\t\t\t\t\t\t\t\t\t& number of schemes implemented. The widths are set to zero manually."\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tdo m = 1, maxNumberSchemes, 1\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Grab the current values of alpha_i and beta and calculate the angle-dependent parameters\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha1 = alpha1Scheme(m)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha2 = alpha2Scheme(m)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha3 = alpha3Scheme(m)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tbeta = betaScheme(m)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tvS = vSScheme(m)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tcall getParameters(' + str(OStype) + ', 1)\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Write the tree-level width to the output file\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\ttreeLevelWidth = 0D0\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) treeLevelWidth\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'LO" // trim(tempVal2) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'LO" // "1" // trim(tempVal3) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Write the NLO width to the output file\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tNLOWidth(m) = 0D0\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) NLOWidth(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'NLO" // trim(tempVal2) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'NLO" // "1" // trim(tempVal3) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tend do\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\telse\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t! Get the full NLO decay width for the chosen scheme\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tm = RenormScheme\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Grab the current values of alpha_i and beta and calculate the angle-dependent parameters\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha1 = alpha1Scheme(m)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha2 = alpha2Scheme(m)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\talpha3 = alpha3Scheme(m)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tbeta = betaScheme(m)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tvS = vSScheme(m)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tcall getParameters(' + str(OStype) + ', 1)\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Calculate the NLO ingredients\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tcall clearcache\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\ttreeLevelTemp = ' + processDescriptionList[x][0] + 'Tree()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tvertexCorrectionsTemp = ' + processDescriptionList[x][0] + 'VC()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tvertexTadpolesTemp = ' + processDescriptionList[x][0] + 'Tad()\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\trealCorrectionsTemp = ' + processDescriptionList[x][0] + 'Real()\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Get the full tree-level decay width\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\ttreeLevelWidth = prefactor*treeLevelTemp\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Schemes 1, 2 and 9 are without tadpoles\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tif ((m == 1) .OR. (m == 2) .OR. (m == 9)) then\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\tfullamplitude(m) = treeLevelTemp + 2D0*DBLE(vertexCorrectionsTemp) + &\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\t\t\t& 2D0*' + processDescriptionList[x][0] + 'CT(m)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\tcall clearcache\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\tNLOWidth(m) = prefactor*( fullamplitude(m) + realCorrectionsTemp )\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\telse\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\tfullamplitude(m) = treeLevelTemp + 2D0*DBLE(vertexCorrectionsTemp + vertexTadpolesTemp) + &\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\t\t\t& 2D0*' + processDescriptionList[x][0] + 'CT(m)\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\tcall clearcache\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\tNLOWidth(m) = prefactor*( fullamplitude(m) + realCorrectionsTemp )\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tend if\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Write the tree-level width to the output file\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) treeLevelWidth\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'LO" // trim(tempVal2) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'LO" // "1" // trim(tempVal3) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Write the NLO width to the output file\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) NLOWidth(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'NLO" // trim(tempVal2) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'NLO" // "1" // trim(tempVal3) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\tend if\n')
electroweakCorrectionsFile.write('\t\t\t\t\tend if\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\tif (resultsAreZero) then\n')
if decayHeaderNeed:
electroweakCorrectionsFile.write('\t\t\t\t\t\t! Prepare the output file content\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "**************"\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " ' + previousDecayparticle + ' decays"\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " (mixing angles, m12^2 and vS) ' + starString2 + '\\n"\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t! Write the values of alpha_i, beta, m12squared and vS in the chosen scheme(s) to the output file\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\tif (RenormScheme .EQ. 0) then\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tdo m = 1, maxNumberSchemes, 1\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) alpha1Scheme(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha1-" // trim(tempVal2) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha1-" // "1" // trim(tempVal3) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tend do\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tdo m = 1, maxNumberSchemes, 1\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) alpha2Scheme(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha2-" // trim(tempVal2) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha2-" // "1" // trim(tempVal3) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tend do\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tdo m = 1, maxNumberSchemes, 1\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) alpha3Scheme(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha3-" // trim(tempVal2) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha3-" // "1" // trim(tempVal3) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tend do\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tdo m = 1, maxNumberSchemes, 1\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) betaScheme(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "beta-" // trim(tempVal2) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "beta-" // "1" // trim(tempVal3) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tend do\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tdo m = 1, maxNumberSchemes, 1\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) vSScheme(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "vS-" // trim(tempVal2) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "vS-" // "1" // trim(tempVal3) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tend do\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\telse\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tm = RenormScheme\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) alpha1Scheme(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha1-" // trim(tempVal2) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha1-" // "1" // trim(tempVal3) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) alpha2Scheme(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha2-" // trim(tempVal2) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha2-" // "1" // trim(tempVal3) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) alpha3Scheme(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha3-" // trim(tempVal2) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "alpha3-" // "1" // trim(tempVal3) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) betaScheme(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "beta-" // trim(tempVal2) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "beta-" // "1" // trim(tempVal3) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) vSScheme(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "vS-" // trim(tempVal2) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "vS-" // "1" // trim(tempVal3) // " ="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\tend if\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) m12squared\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "m12squared ="\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t! Prepare the output file content\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "**************"\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " ' + processDescriptionList[x][1] + '"\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " (electroweak corrections) ' + starString + '\\n"\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\tif (RenormScheme .EQ. 0) then\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tdo m = 1, maxNumberSchemes, 1\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Write the tree-level width to the output file\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\ttreeLevelWidth = 0D0\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) treeLevelWidth\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'LO" // trim(tempVal2) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'LO" // "1" // trim(tempVal3) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Write the NLO width to the output file\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tNLOWidth(m) = 0D0\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) NLOWidth(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'NLO" // trim(tempVal2) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'NLO" // "1" // trim(tempVal3) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tend do\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\telse if (RenormScheme .GT. maxNumberSchemes) then\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tdo m = 1, maxNumberSchemes, 1\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Write the tree-level width to the output file\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\ttreeLevelWidth = 0D0\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) treeLevelWidth\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'LO" // trim(tempVal2) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'LO" // "1" // trim(tempVal3) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Write the NLO width to the output file\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tNLOWidth(m) = 0D0\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) NLOWidth(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'NLO" // trim(tempVal2) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'NLO" // "1" // trim(tempVal3) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tend do\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\telse\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\tm = RenormScheme\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Write the tree-level width to the output file\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\ttreeLevelWidth = 0D0\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) treeLevelWidth\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal2, '(I1)' ) m\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal3, '(I1)' ) (m-10)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'LO" // trim(tempVal2) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'LO" // "1" // trim(tempVal3) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t! Write the NLO width to the output file\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\tNLOWidth(m) = 0D0\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\twrite( tempVal, '(ES23.15E3)' ) NLOWidth(m)\n")
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tif (m .lt. 10) then\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'NLO" // trim(tempVal2) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\telse\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // "' + processDescriptionList[x][0] + 'NLO" // "1" // trim(tempVal3) // " ' + additionalWhitespaces + '="\n')
electroweakCorrectionsFile.write("\t\t\t\t\t\t\t\tend if\n")
electroweakCorrectionsFile.write('\t\t\t\t\t\t\t\toutputFileContent = trim(outputFileContent) // " " // (trim(tempVal) // "\\n")\n')
electroweakCorrectionsFile.write('\t\t\t\t\t\tend if\n')
electroweakCorrectionsFile.write('\t\t\t\t\tend if\n\n')
# electroweakCorrectionsFile.write('\t\t\t\t\twrite (*,*) "TreeLevelWidth = ", treeLevelWidth\n')
# electroweakCorrectionsFile.write('\t\t\t\t\tif (RenormScheme .EQ. 0) then\n')
# electroweakCorrectionsFile.write('\t\t\t\t\t\tdo m = 1, maxNumberSchemes, 1\n')
# electroweakCorrectionsFile.write('\t\t\t\t\t\t\twrite (*,*) "NLOWidth(", m, ") = ", NLOWidth(m)\n')
# electroweakCorrectionsFile.write('\t\t\t\t\t\tend do\n')
# electroweakCorrectionsFile.write('\t\t\t\t\telse if (RenormScheme .GT. maxNumberSchemes) then\n')
# electroweakCorrectionsFile.write('\t\t\t\t\t\tdo m = 1, maxNumberSchemes, 1\n')
# electroweakCorrectionsFile.write('\t\t\t\t\t\t\twrite (*,*) "NLOWidth(", m, ") = ", NLOWidth(m)\n')
# electroweakCorrectionsFile.write('\t\t\t\t\t\tend do\n')
# electroweakCorrectionsFile.write('\t\t\t\t\telse\n')
# electroweakCorrectionsFile.write('\t\t\t\t\t\tm = RenormScheme\n')
# electroweakCorrectionsFile.write('\t\t\t\t\t\t\twrite (*,*) "NLOWidth(", m, ") = ", NLOWidth(m)\n')
# electroweakCorrectionsFile.write('\t\t\t\t\tend if\n')
# electroweakCorrectionsFile.write('\t\t\t\t\twrite (*,*) "------------------------"\n')
# electroweakCorrectionsFile.write('\t\t\t\t\twrite (*,*) ""\n\n')
electroweakCorrectionsFile.write('\t\t\t! Write the results to the output file\n')
electroweakCorrectionsFile.write("\t\t\topen(unit=44, file=trim(pathToOutputFiles)//trim(targetName), status='old', &\n")
electroweakCorrectionsFile.write("\t\t\t&action='write', position='append', iostat=statWrite)\n")
electroweakCorrectionsFile.write('\t\t\t\tif ( statWrite == 0) then\n')
electroweakCorrectionsFile.write('\t\t\t\t\twrite(44,*) trim(outputFileContent)\n')
electroweakCorrectionsFile.write('\t\t\t\telse\n')
electroweakCorrectionsFile.write("\t\t\t\t\twrite(*,*) 'ERROR: could not create output file for writing!'\n")
electroweakCorrectionsFile.write('\t\t\t\tend if\n')
electroweakCorrectionsFile.write('\t\t\tclose(unit=44)\n')
electroweakCorrectionsFile.write('\t\tcall ltexi\n\n\n')
electroweakCorrectionsFile.write('end program electroweakCorrections\n')
electroweakCorrectionsFile.close()
#----------------------------#
# Main Program #
#----------------------------#
print("Starting the installation script.\n")
# Find the LoopTools install file and directory, if it exists already in the repo (per default, it does not)
filenameLoopTools = ''
for file in os.listdir('.'):
if fnmatch(file, 'LoopTools-*.tar.gz'):
filenameLoopTools = file
loopToolsDirectory = filenameLoopTools.replace('.tar.gz', '')
# Ask the user whether LoopTools shall be installed
fileLoopToolsExists = os.path.isfile(filenameLoopTools)
if fileLoopToolsExists:
loopToolsCreationWanted = CommonFunctions.queryBoolean("Found the LoopTools install file " + filenameLoopTools + ". Do you want me to install LoopTools automatically?\nWARNING: this will delete the current LoopTools instance installed under ewN2HDECAY/" + loopToolsDirectory + ", if it exists.")
else:
# Ask for a download of the archive
downloadLoopToolsWanted = CommonFunctions.queryBoolean("LoopTools-*.tar.gz file not found. Do you want me to download version " + Config.loopToolsVersion + " of LoopTools (if you want to change the version, modify Config.py and re-run setup.py)?")
if downloadLoopToolsWanted:
# Check for the used OS
currentOS = sys.platform
# Check if the given LoopTools version actually exists on the server
urlToCheck = "http://www.feynarts.de/looptools/" + Config.loopToolsVersion + ".tar.gz"
neededCode = 0
try:
ret = urlopen(urlToCheck)
neededCode = ret.getcode()
except:
neededCode = 404
if neededCode == 200:
print("Start download?")
# Windows (cygwin) download routine
if currentOS == 'win32':
prompt = [Config.pathToCygwin, '-c', "curl http://www.feynarts.de/looptools/" + Config.loopToolsVersion + ".tar.gz -o " + Config.loopToolsVersion + ".tar.gz"]
subprocess.call(prompt, stdin=None, stdout=None, stderr=None, shell=False)
# Linux/macOS download routine
else:
prompt = ['bash', '-c', "curl http://www.feynarts.de/looptools/" + Config.loopToolsVersion + ".tar.gz -o " + Config.loopToolsVersion + ".tar.gz"]
subprocess.call(prompt, stdin=None, stdout=None, stderr=None, shell=False)
# Refresh the name of the installation file and directory
filenameLoopTools = ''
for file in os.listdir('.'):
if fnmatch(file, 'LoopTools-*.tar.gz'):
filenameLoopTools = file
loopToolsDirectory = filenameLoopTools.replace('.tar.gz', '')
fileLoopToolsExists = os.path.isfile(filenameLoopTools)
if fileLoopToolsExists:
loopToolsCreationWanted = CommonFunctions.queryBoolean("\nFound the LoopTools install file " + filenameLoopTools + ". Do you want me to install LoopTools automatically?\nWARNING: this will delete the current LoopTools instance installed under ewN2HDECAY/" + loopToolsDirectory + ", if it exists.")
else:
print("ERROR: could not find the LoopTools installation file " + filenameLoopTools + " after attempting the download. Do you have an active internet connection and is the LoopTools server online?")
print("The installation routine is aborted. Please re-run setup.py.")
sys.exit()
else:
print("\nERROR: I could not find the version of LoopTools that you specified in Config.py on the LoopTools server. Please check Config.py.")
print("The installation routine is aborted. Please re-run setup.py.")
sys.exit()
else:
print('LoopTools will not be downloaded. WARNING: LoopTools cannot be build without the archive in the ewN2HDECAY folder.')
loopToolsCreationWanted = False
# Start the LoopTools installation routine
if loopToolsCreationWanted:
# Check for the used OS
currentOS = sys.platform
# Windows (cygwin) installation routine
if currentOS == 'win32':
print("\nStarting the Windows (cygwin) installation routine.")
print("\nI will use the following path to Cygwin: " + Config.pathToCygwin)
cygwinIsCorrectPath = CommonFunctions.queryBoolean("Is this the correct path to Cygwin on your machine?")
if cygwinIsCorrectPath:
if os.path.isdir(loopToolsDirectory):
print("\nRemoving existing LoopTools installation...")
rmtree(loopToolsDirectory)
print("\nStarting the LoopTools installation routine...")
# Unzip the archive
prompt = [Config.pathToCygwin, '-c', "gunzip -c " + filenameLoopTools + " | tar xvf -"]
subprocess.call(prompt, stdin=None, stdout=None, stderr=None, shell=False)
# Change to the LoopTools folder and configure
os.chdir(loopToolsDirectory)
prompt = [Config.pathToCygwin, '-c', "./configure"]
subprocess.call(prompt, stdin=None, stdout=None, stderr=None, shell=False)
# Replace 'find' in the LoopTools makefile to '/bin/find' (this is necessary since otherwise, Windows' FIND is used instead of Cygwin's find!)
os.rename('makefile', 'makefileTemp')
with open("makefileTemp", "rt") as fin:
with open("makefile", "wt") as fout:
for line in fin:
fout.write(line.replace('find', '/bin/find'))
os.remove("makefileTemp")
# Make the program
prompt = [Config.pathToCygwin, '-c', "make lib"]
subprocess.call(prompt, stdin=None, stdout=None, stderr=None, shell=False)
prompt = [Config.pathToCygwin, '-c', "make install"]
subprocess.call(prompt, stdin=None, stdout=None, stderr=None, shell=False)
prompt = [Config.pathToCygwin, '-c', "make clean"]
subprocess.call(prompt, stdin=None, stdout=None, stderr=None, shell=False)
os.chdir('..')
else:
print("\nPlease enter the correct path to Cygwin in Config.py first and then re-run the setup.py script.")
sys.exit()
# Linux/macOS installation routine
else:
print("Starting the Linux/macOS installation routine.")
# Remove the existing copy of LoopTools
if os.path.isdir(loopToolsDirectory):
print("\nRemoving existing LoopTools installation...")
rmtree(loopToolsDirectory)
# Unzip the archive
prompt = ['bash', '-c', "gunzip -c " + filenameLoopTools + " | tar xvf -"]
subprocess.call(prompt, stdin=None, stdout=None, stderr=None, shell=False)
# Change to the LoopTools folder and configure and make the program
os.chdir(loopToolsDirectory)
prompt = ['bash', '-c', "./configure"]
subprocess.call(prompt, stdin=None, stdout=None, stderr=None, shell=False)
prompt = ['bash', '-c', "make lib"]
subprocess.call(prompt, stdin=None, stdout=None, stderr=None, shell=False)
prompt = ['bash', '-c', "make install"]
subprocess.call(prompt, stdin=None, stdout=None, stderr=None, shell=False)
prompt = ['bash', '-c', "make clean"]
subprocess.call(prompt, stdin=None, stdout=None, stderr=None, shell=False)
os.chdir('..')
# Search for the correct folders within the LoopTools main folder that contains the libraries and executables
for subdir in os.listdir(loopToolsDirectory):
if os.path.isdir(loopToolsDirectory + os.sep + subdir):
tempList = os.listdir(loopToolsDirectory + os.sep + subdir)
if 'bin' in tempList and 'include' in tempList and 'lib' in tempList:
loopToolsLibRootFolder = (loopToolsDirectory + os.sep + subdir).replace('\\', '/')
loopToolsLibSubFolder = 'lib'
loopToolsExecSubFolder = 'bin'
break
elif 'bin' in tempList and 'include' in tempList and 'lib64' in tempList:
loopToolsLibRootFolder = (loopToolsDirectory + os.sep + subdir).replace('\\', '/')
loopToolsLibSubFolder = 'lib64'
loopToolsExecSubFolder = 'bin'
break
elif 'bin64' in tempList and 'include' in tempList and 'lib' in tempList:
loopToolsLibRootFolder = (loopToolsDirectory + os.sep + subdir).replace('\\', '/')
loopToolsLibSubFolder = 'lib'
loopToolsExecSubFolder = 'bin64'
break
elif 'bin64' in tempList and 'include' in tempList and 'lib64' in tempList:
loopToolsLibRootFolder = (loopToolsDirectory + os.sep + subdir).replace('\\', '/')
loopToolsLibSubFolder = 'lib64'
loopToolsExecSubFolder = 'bin64'
break
useRelativeLoopToolsPath = True
# Save the correct folders to Config.py
os.rename('Config.py', 'ConfigTemp.py')
with open("ConfigTemp.py", "rt") as fin:
with open("Config.py", "wt") as fout:
for line in fin:
if 'pathLoopToolsLibs' in line:
tempLine = "pathLoopToolsLibs = '" + loopToolsLibSubFolder + "'\t\t\t\t\t\t\t\t\t# Specify the LoopTools subfolder (relative to pathLoopTools) where the LoopTools libraries are contained (NOTE: this depends on the OS and chip architecture; on Windows, this is normally 'lib', on Linux and macOS, it is normally 'lib64')\n"
fout.write(tempLine)
elif 'pathLoopToolsExecs' in line:
tempLine = "pathLoopToolsExecs = '" + loopToolsExecSubFolder + "'\t\t\t\t\t\t\t\t\t# Specify the LoopTools subfolder (relative to pathLoopTools) where the LoopTools libraries are contained (NOTE: this depends on the OS and chip architecture; on Windows, this is normally 'lib', on Linux and macOS, it is normally 'lib64')\n"
fout.write(tempLine)
elif 'pathLoopTools' in line:
tempLine = "pathLoopTools = '" + loopToolsLibRootFolder + "'\t# Specify the path to the LoopTools root folder (IMPORTANT: the path must never *end* with '/' and if useRelativePath is True, it must not *start* with '/' either! If useRelativePath is False, it depends on the OS if the full absolute path starts with '/' or not: on Windows, it typically does not, on Linux, it typically does)\n"
fout.write(tempLine)
elif 'useRelativeLoopToolsPath' in line:
tempLine = "useRelativeLoopToolsPath = True\t\t\t\t\t\t\t\t# Set True if you want to set the path to LoopTools relative to the ewN2HDECAY installation path (useful if you installed LoopTools e.g. in a subdirectory of the ewN2HDECAY folder) or False if you want to use an absolute path to LoopTools\n"
fout.write(tempLine)
else:
fout.write(line)
os.remove('ConfigTemp.py')
print('\nInstallation of LoopTools is finished.')
# Ask whether the zip file should be deleted
deleteLoopToolsZip = CommonFunctions.queryBoolean("Do you want to remove the zip archive of LoopTools now (not needed anymore after successful installation)?")
if deleteLoopToolsZip:
os.remove(filenameLoopTools)
else:
# If LoopTools is not installed automatically, then the paths are read from Config.py
print('\nLoopTools is not installed automatically. Please make sure now that the root path to LoopTools and the relative paths to the library and binary folder are set correctly in Config.py. If you use absolute paths, please make sure to set "useRelativeLoopToolsPath = False" in Config.py.')
continueWithManualInstallation = CommonFunctions.queryBoolean("\nAre all paths set correctly in Config.py?")
if continueWithManualInstallation:
useRelativeLoopToolsPath = Config.useRelativeLoopToolsPath
loopToolsLibRootFolder = Config.pathLoopTools
loopToolsLibSubFolder = Config.pathLoopToolsLibs
loopToolsExecSubFolder = Config.pathLoopToolsExecs
else:
print('\nPlease correct the paths in Config.py manually and re-run the setup.py script.')
sys.exit()
# Ask the user if the makefile should be created (if it already exists, ask if it should be re-created)
makefileExists = os.path.isfile("makefile")
if makefileExists:
makefileCreationWanted = CommonFunctions.queryBoolean("\nmakefile already exists. Do you want to recreate it (WARNING: this overwrites the existing makefile)?")
else:
makefileCreationWanted = CommonFunctions.queryBoolean("\nmakefile not found. Do you want to create it now?")
if makefileCreationWanted:
print(' Creating makefile...')
createMakefile("makefile", loopToolsLibRootFolder, loopToolsLibSubFolder, loopToolsExecSubFolder, useRelativeLoopToolsPath, 'gfortran')
print(' ...done.\n')
else:
print(' makefile is not created.\n')
makefileExistsNow = os.path.isfile("makefile")
# Ask the user if the electroweak corrections should be created
ewFileExists = os.path.isfile("electroweakCorrections.F90")
if ewFileExists:
ewFileCreationWanted = CommonFunctions.queryBoolean("electroweakCorrections.F90 already exists. Do you want to recreate it (WARNING: this overwrites the existing electroweakCorrections.F90)?")
else:
ewFileCreationWanted = CommonFunctions.queryBoolean("electroweakCorrections.F90 not found. Do you want to create it now?")
if ewFileCreationWanted:
print(' Creating electroweakCorrections.F90...')
createElectroweakCorrections()
print(' ...done.\n')
else:
print(' electroweakCorrections.F90 is not created.\n')
ewFileExistsNow = os.path.isfile("electroweakCorrections.F90")
# Check if the necessary files are present; if not, end the program
missingFiles = False
if not makefileExistsNow:
print('Error: makefile not found!')
missingFiles = True
if not ewFileExistsNow:
print('Error: electroweakCorrections.F90 not found!')
missingFiles = True
if missingFiles:
print('\nError: major components of ewN2HDECAY missing. Terminating ewN2HDECAY now.')
sys.exit()
# Make the program
makeWanted = CommonFunctions.queryBoolean("Do you want to make the program now (the make process may take a few minutes)?")
if makeWanted:
print('Making main program ewN2HDECAY...\n')
prompt = ['bash', '-c', "make electroweakCorrections"]
subprocess.call(prompt, stdin=None, stdout=None, stderr=None, shell=False)
print('Making subprogram N2HDECAY...\n')
os.chdir('N2HDECAY')
prompt = ['bash', '-c', "make"]
subprocess.call(prompt, stdin=None, stdout=None, stderr=None, shell=False)
os.chdir('..')
else:
print('Make process skipped.\n')
# Cleaning the installation
cleanWanted = CommonFunctions.queryBoolean("Do you want to make clean (optional)?")
if cleanWanted:
print('Cleaning main program ewN2HDECAY...\n')
prompt = ['bash', '-c', "make clean"]
subprocess.call(prompt, stdin=None, stdout=None, stderr=None, shell=False)
print('Cleaning subprogram HDECAY...\n')
os.chdir('N2HDECAY')
prompt = ['bash', '-c', "make clean"]
subprocess.call(prompt, stdin=None, stdout=None, stderr=None, shell=False)
os.chdir('..')
else:
print('Make clean process skipped.\n')
print("Setup completed.\n")
| 134,033 | 83.404282 | 397 | py |
ewN2HDECAY | ewN2HDECAY-master/ewN2HDECAY.py | #!/usr/bin/env python
#Filename: ewN2HDECAY.py
#################################################################################################################################
# #
# ewN2HDECAY #
# #
# Purpose: A program for the calculation One-Loop Electroweak Corrections to Higgs Decays in the Next-to-Minial #
# Two-Higgs-Doublet Model (N2HDM) Including State-of-the-Art QCD Corrections #
# Authors: Dr. Marcel Krause (marcel.krause@alumni.kit.edu) #
# Prof. Dr. M. Margarete Muehlleitner (margarete.muehlleitner@kit.edu) #
# Version: 1.0.3 #
# Date: 06.12.2019 #
# Copyright: Copyright (C) 2019, Marcel Krause and Milada Margarete Muehlleitner #
# License: GNU General Public License (GNU GPL-3.0-or-later) #
# #
# ewN2HDECAY is released under GNU General Public License (GNU GPL-3.0-or-later). This program is free software: #
# you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the #
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You have received a copy (LICENSE.md) of the GNU General Public License along with this program in the #
# ewN2HDECAY root directory. #
# #
# Citation: When you use this program, please acknowledge the work of our and other groups by citing the following papers: #
# The manual for ewN2HDECAY: #
# - M. Krause, M. Muhlleitner, Comp. Phys. Commun. 247 (2020) 106924, arXiv:1904.02103 (hep-ph) #
# The manuals for HDECAY and N2HDECAY: #
# - A. Djouadi, J. Kalinowski, M. Spira, Comp. Phys. Commun. 108 (1998) 56, hep-ph/9704448 #
# - A. Djouadi, J. Kalinowski, M. Muhlleitner, M. Spira, arXiv:1801.09506 (hep-ph) #
# - M. Muhlleitner, M. O. P. Sampaio, R. Santos, J. Wittbrodt, JHEP 1703 (2017) 094, arXiv:1612.01309 (hep-ph) #
# The paper on the EW correction to the N2HDM decays: #
# - M. Krause, D. Lopez-Val, M. Muhlleitner, R. Santos, JHEP 12 (2017) 077, arXiv:1708.01578 (hep-ph) #
# The publication of LoopTools: #
# - T. Hahn, M. Perez-Victoria, Comp. Phys. Commun. 118 (1999) 153-165, hep-ph/9807565 #
# #
#################################################################################################################################
#------------------------------#
# Import Modules #
#------------------------------#
import sys
import os
from shutil import copyfile, rmtree
from math import pi, sqrt
import subprocess
# import multiprocessing
import CommonFunctions # Provides common, often used functions for different scripts of ewN2HDECAY
#-------------------------#
# Settings #
#-------------------------#
# WARNING: do not change these settings if you do not know what they do!
lineToInsert = 134 # This is the line at which the temporary input file ends and at which we append the electroweak corrections
lineWhereAlphaAtMZ = 27 # This is the line at which in the temporary input file the fine-structure constant at the Z boson mass MZ is specified
lineWhereGFCalc = 29 # This is the line at which in the temporary input file the calculated Fermi constant GFCALC is specified
lineWhereMZ = 32 # This is the line at which in the temporary input file the Z boson mass MZ is specified
lineWhereMW = 33 # This is the line at which in the temporary input file the W boson mass MW is specified
lineWhereOSMC = 23 # This is the line at which the OS MC value has to be inserted in the temporary input file
lineWhereOSMB = 24 # This is the line at which the OS MB value has to be inserted in the temporary input file
lineWhereParamType = 57 # This is the line at which the parameter type is specified
lineWhereRefScheme = 77 # This is the line at which the reference renormalization scheme is specified
#----------------------------#
# Main Program #
#----------------------------#
if __name__ == "__main__":
# Print the welcome screen
print('''
+---------------------------------------+
| |
| ewN2HDECAY 1.0.3 |
| |
| / |
| / |
| / |
| --- / |
| ______________/ \ |
| \ / |
| --- \ |
| \ |
| \ |
| \ |
| |
+---------------------------------------+
When you use this program please cite:
The manual for ewN2HDECAY:
- M. Krause, M. Muhlleitner, Comp. Phys. Commun. 247 (2020) 106924, arXiv:1904.02103 (hep-ph)
The manuals for HDECAY and N2HDECAY:
- A. Djouadi, J. Kalinowski, M. Spira, Comp. Phys. Commun. 108 (1998) 56, hep-ph/9704448
- A. Djouadi, J. Kalinowski, M. Muhlleitner, M. Spira, arXiv:1801.09506 (hep-ph)
- M. Muhlleitner, M. O. P. Sampaio, R. Santos, J. Wittbrodt, JHEP 1703 (2017) 094, arXiv:1612.01309 (hep-ph)
The paper on the EW correction to the N2HDM decays:
- M. Krause, D. Lopez-Val, M. Muhlleitner, R. Santos, JHEP 12 (2017) 077, arXiv:1708.01578 (hep-ph)
The publication of LoopTools:
- T. Hahn, M. Perez-Victoria, Comp. Phys. Commun. 118 (1999) 153-165, hep-ph/9807565
ewN2HDECAY is released under GNU General Public License (GNU GPL-3.0-or-later). This program is free software:
you can redistribute it and/or modify it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You have received a copy (LICENSE.md) of the GNU General Public License along with this program in the ewN2HDECAY
root directory.
Copyright 2019, Marcel Krause and Milada Margarete Muehlleitner.
''')
# Get a list of all input files
inputPath = "Input"
inputFileList = os.listdir(inputPath)
if '..' in inputFileList:
inputFileList.remove('..')
if '.' in inputFileList:
inputFileList.remove('.')
# Iterate over all input files
for inputFileTemp in inputFileList:
print("Calculating corrections for input file " + inputFileTemp + " ...\n")
# Copy the input file to the N2HDECAY subfolder
print("Copying input files into N2HDECAY folder...")
filenameIn = "Input" + os.sep + inputFileTemp
filenameOut = "N2HDECAY" + os.sep + "n2hdecay.in"
# Remove any existing input and fermion masses file in N2HDECAY
if os.path.isfile(filenameOut):
os.remove(filenameOut)
if os.path.isfile("N2HDECAY" + os.sep + "fermionmasses.dat"):
os.remove("N2HDECAY" + os.sep + "fermionmasses.dat")
if os.path.isfile("N2HDECAY" + os.sep + "alphaandbeta.dat"):
os.remove("N2HDECAY" + os.sep + "alphaandbeta.dat")
copyfile(filenameIn, filenameOut)
with open("N2HDECAY" + os.sep + "alphaandbeta.dat", 'w') as fileHandler:
fileHandler.write(' alpha1 = 0D0\n alpha2 = 0D0\n alpha3 = 0D0\n beta = 0D0\n')
print("... done.\n")
# Let N2HDECAY run in minimal mode to produce the fermion mass file
print("Starting N2HDECAY in minimal mode...")
os.chdir('N2HDECAY')
prompt = ['./run', '1']
subprocess.call(prompt, stdin=None, stdout=None, stderr=None, shell=False)
os.chdir('..')
print("N2HDECAY in minimal mode terminated.\n")
# Read the fermion masses from the fermion mass file
filenameMasses = "N2HDECAY" + os.sep + "fermionmasses.dat"
massFileLines = list(line.rstrip('\n') for line in open(filenameMasses))
MCOSCalc = float((massFileLines[0].split('='))[1].strip())
MBOSCalc = float((massFileLines[1].split('='))[1].strip())
# Copy the file name to the N2HDECAY folder and truncate it at the end
print("Copying input files into N2HDECAY folder...")
filenameIn = "Input" + os.sep + inputFileTemp
filenameOut = "N2HDECAY" + os.sep + "n2hdecay.in"
if os.path.isfile(filenameOut):
os.remove(filenameOut)
fileHandler = open(filenameIn, "r")
convertedFileHandler = []
lineCount = 1
for line in fileHandler:
# If the parameter type is 2, then the reference renormalization scheme has to be set to zero
if (lineCount == lineWhereRefScheme):
refScheme = int((line.split())[2])
if (refScheme == 0):
print("\nERROR: REFSCHEM=0 is given as input, but REFSCHEM must be greater than zero.")
print("ewN2HDECAY will be terminated now (ERROR: REFSCHEM=0 was set).")
sys.exit()
if (refScheme != 0) and (paramType == 2):
convertedFileHandler.append("REFSCHEM = 0\n")
print("\nWARNING: REFSCHEM (line 59) is given as a non-zero value, but TYPE=2 (line 57, lambdas as input) is set.")
print("REFSCHEM is overwritten to zero, automatic parameter conversion is deactivated!")
lineCount += 1
continue
# Write the current line in an array
convertedFileHandler.append(line)
# Check for the parameter type
if lineCount == lineWhereParamType:
paramType = int((line.split())[2])
# Pick out the values of MW, MZ and alphaAtMZ
if lineCount == lineWhereMZ:
massMZ = float((line.split())[2])
if lineCount == lineWhereMW:
massMW = float((line.split())[2])
if lineCount == lineWhereAlphaAtMZ:
alphaAtMZ = float((line.split())[2])
lineCount += 1
fileHandler.close()
# Write a copy of the file to the output folder, but add GFCALC, MCOSCALC and MBOSCALC with the calculated values
GFcalc = pi/sqrt(2)*alphaAtMZ/(massMW**2*(1-massMW**2/massMZ**2))
GFline = "GFCALC = " + str(GFcalc) + "\n"
MCOSline = "MCOSCALC = " + str(MCOSCalc) + "\n"
MBOSline = "MBOSCALC = " + str(MBOSCalc) + "\n"
lineCount = 1
convertedFile = ''
renScaleIsDynamic = '0'
for line in convertedFileHandler:
if "OUTSCALE" in line and "MIN" in line:
renScaleIsDynamic = '1'
if lineCount == lineWhereGFCalc:
convertedFile += GFline
elif lineCount == lineWhereOSMC:
convertedFile += MCOSline
convertedFile += MBOSline
convertedFile += line
else:
convertedFile += line
lineCount += 1
fileHandler = open(filenameOut, "w")
fileHandler.write(convertedFile)
fileHandler.close()
print("... done.\n")
# Calculate the electroweak corrections
print("Calculating electroweak corrections...\n")
prompt = ['./electroweakCorrections', '0', '0', '0', '1', 'N2HDECAY' + os.sep + 'n2hdecay.in', 'n2hdecay.in', renScaleIsDynamic]
subprocess.call(prompt, stdin=None, stdout=None, stderr=None, shell=False)
print("Calculation of electroweak corrections done.\n")
# Replace the newline character in each file with a proper newline
print("Postprocessing temporary input file...")
fileHandler = open(filenameOut, "r")
convertedFile = ''
lineCount = 1
for line in fileHandler:
# Convert the literal newlines to actual ones and remove the leading whitespace from the Fortran output
if (lineCount == lineToInsert):
lineToReplace = line.replace('\\n', '\n')[1:]
else:
lineToReplace = line.replace('\\n', '\n')
convertedFile += lineToReplace
# print(lineToReplace)
lineCount += 1
fileHandler.close()
# Store the results file in the correct directory
if os.path.isfile(filenameOut):
os.remove(filenameOut)
fileHandler = open(filenameOut, "w")
fileHandler.write(convertedFile)
fileHandler.close()
print("... done.\n")
# Start N2HDECAY in the normal (non-minimal) configuration
print("Starting N2HDECAY in standard mode...")
os.chdir('N2HDECAY')
prompt = ['./run']
subprocess.call(prompt, stdin=None, stdout=None, stderr=None, shell=False)
os.chdir('..')
print("N2HDECAY in standard mode terminated.\n")
# Copy the output files to the results folder
print("Copying input files into output folder...")
filenameIn = "N2HDECAY" + os.sep + "slha.out"
filenameOut = "Results" + os.sep + inputFileTemp.replace('.in', '_BR.out')
copyfile(filenameIn, filenameOut)
filenameIn = "N2HDECAY" + os.sep + "ewpartialwidth.out"
filenameOut = "Results" + os.sep + inputFileTemp.replace('.in', '_EW.out')
copyfile(filenameIn, filenameOut)
print("... done.\n")
# Cleaning
if os.path.isfile("N2HDECAY" + os.sep + "fermionmasses.dat"):
os.remove("N2HDECAY" + os.sep + "fermionmasses.dat")
if os.path.isfile("N2HDECAY" + os.sep + "alphaandbeta.dat"):
os.remove("N2HDECAY" + os.sep + "alphaandbeta.dat")
print("Corrections for input file " + inputFileTemp + " done.\n")
# End of program is reached
print("All calculations finished. Thanks for using ewN2HDECAY!")
# Print the end screen
print('''
When you use this program please cite:
The manual for ewN2HDECAY:
- M. Krause, M. Muhlleitner, Comp. Phys. Commun. 247 (2020) 106924, arXiv:1904.02103 (hep-ph)
The manuals for HDECAY and N2HDECAY:
- A. Djouadi, J. Kalinowski, M. Spira, Comp. Phys. Commun. 108 (1998) 56, hep-ph/9704448
- A. Djouadi, J. Kalinowski, M. Muhlleitner, M. Spira, arXiv:1801.09506 (hep-ph)
- M. Muhlleitner, M. O. P. Sampaio, R. Santos, J. Wittbrodt, JHEP 1703 (2017) 094, arXiv:1612.01309 (hep-ph)
The paper on the EW correction to the N2HDM decays:
- M. Krause, D. Lopez-Val, M. Muhlleitner, R. Santos, JHEP 12 (2017) 077, arXiv:1708.01578 (hep-ph)
The publication of LoopTools:
- T. Hahn, M. Perez-Victoria, Comp. Phys. Commun. 118 (1999) 153-165, hep-ph/9807565
ewN2HDECAY is released under GNU General Public License (GNU GPL-3.0-or-later). This program is free software:
you can redistribute it and/or modify it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You have received a copy (LICENSE.md) of the GNU General Public License along with this program in the ewN2HDECAY
root directory.
Copyright 2019, Marcel Krause and Milada Margarete Muehlleitner.
''')
sys.exit()
| 16,960 | 53.362179 | 143 | py |
applesoss | applesoss-main/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
setup(name='applesoss',
version='2.0.0',
license='MIT',
author='Michael Radica',
author_email='michael.radica@umontreal.ca',
packages=['applesoss'],
include_package_data=True,
url='https://github.com/radicamc/APPLESOSS',
description='A Producer of ProfiLEs for SOSS',
package_data={'': ['README.md', 'LICENSE']},
install_requires=['astropy', 'astroquery', 'matplotlib', 'numpy',
'scipy'],
extras_requires={'webbpsf': ['webbpsf>=1.1.1']},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.10',
],
)
| 882 | 31.703704 | 71 | py |
applesoss | applesoss-main/applesoss/applesoss_utils.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thurs Mar 11 14:35 2020
@author: MCR
Miscellaneous utility functions for applesoss.
"""
from astropy.io import fits
from datetime import datetime
import numpy as np
import warnings
try:
import webbpsf
except ModuleNotFoundError:
print('WebbPSF not installed. Profile simulation not available.')
def find_consecutive(data, stepsize=1):
"""Find consecutive values in an array.
"""
return np.split(data, np.where(np.diff(data) != stepsize)[0] + 1)
def generate_psfs(wave_increment=0.1, npix=400, obs_date=None, verbose=0):
"""Generate 1D SOSS PSFs across the full 0.5 - 2.9µm range of all orders.
Parameters
----------
wave_increment : float
Wavelength step (in µm) for PSF simulation.
npix : int
Size (in native pixels) of the 1D PSFs.
obs_date : str
Date of observations in 'yyyy-mm-dd', or yyyy-mm-ddThh:mm:ss format.
verbose : int
Level of verbosity.
Returns
-------
psfs : array-like
Array of 1D PSFs at specified wavelength increments.
"""
# Calculate the number of PSFs to generate based on the SOSS wavelength
# range and the chosen increment.
nsteps = int((2.9 - 0.5) / wave_increment)
# Estimate time to completion assuming ~10s per PSF.
time_frame = int((nsteps * 10) / 60)
if verbose != 0:
print(' Generating {0} PSFs... Expected to take about {1} min(s).'.format(nsteps, time_frame))
wavelengths = (np.linspace(0.5, 2.9, nsteps) * 1e-6)[::-1]
# Set up WebbPSF simulation for NIRISS.
niriss = webbpsf.NIRISS()
# Override the default minimum wavelength of 0.6 microns.
niriss.SHORT_WAVELENGTH_MIN = 0.5e-6
# Set correct filter and pupil wheel components.
niriss.filter = 'CLEAR'
niriss.pupil_mask = 'GR700XD'
# Get real OTE WFE from date of observations.
if obs_date is not None:
try:
if 'T' not in obs_date:
obs_date += 'T00:00:00'
niriss.load_wss_opd_by_date(obs_date, verbose=False)
except ValueError:
print('Error in specified date.'
'Falling back to default optical models.')
except RuntimeError:
print('Could not find JWST OPDs for the specified date.'
'Falling back to default optical models.')
with warnings.catch_warnings():
warnings.simplefilter("ignore")
cube = niriss.calc_datacube(wavelengths=wavelengths, fov_pixels=npix,
oversample=1)
# Collapse into 1D PSF
psfs_1d = np.nansum(cube[0].data, axis=1)
# Turn into record array and attach wavelength info
psfs = np.recarray((nsteps, npix),
dtype=[('Wave', float), ('PSF', float)])
psfs['Wave'] = wavelengths[:, None]*1e6 # Convert to µm
psfs['PSF'] = psfs_1d
return psfs
def generate_superprof(deep, cens, badpix=None, col_start=1850, col_end=2030):
"""Obtain an uncontaminated trace profile using the red end of order 1.
Parameters
----------
deep : array-like
SOSS deep stack.
cens : dict
Centroids dictionary.
badpix : array-like
Bad pixel mask that is the same shape as deep. Zero-valued pixels will
be used, and all other-valued pixels will be masked.
col_start : int
First column to consider.
col_end : int
Last column to consider.
Returns
-------
superprof : array-like
Uncontaminated order 1 trace profile.
"""
dimy, dimx = np.shape(deep)
new = np.zeros((dimy, col_end - col_start))
# Mask any flagged pixels
ii = np.where(badpix != 0)
deep[ii] = np.nan
# Loop over all columns to consider. Rectify the trace by interpolating
# the profiles to a common centroid.
for i in range(col_start, col_end):
old_prof = deep[:, i]
# Do the interpolation at 10x oversampling to minimize errors.
old_os = np.interp(np.linspace(0, dimy-1, dimy*10),
np.arange(dimy), old_prof)
cen = cens['order 1']['Y centroid'][i]
new_prof = np.interp(np.arange(dimy),
np.linspace(0, dimy-1, dimy*10) - cen +
cens['order 1']['Y centroid'][col_end], old_os)
# normalize by the profile sum to remove amplitude variations.
new[:, i-col_start] = new_prof / np.nansum(new_prof)
# Collapse the recitifed trace along the wavelength axis to create a
# single profile.
flat = np.nanmedian(new, axis=1)
# Wing on the upper part of the profile is more extended than the lower
# since the profile is located towards the bottom of the detector. Mirror
# the upper half to the lower half.
ind = int(cens['order 1']['Y centroid'][col_end])
superprof = np.concatenate([flat[::-1][:-ind], flat[ind:]])
return superprof
def get_wave_solution(wavemap_file, order):
"""Extract wavelength calibration information from the wavelength solution
reference file.
Parameters
----------
wavemap_file : str
Path to SOSS 2D wavelength solution reference file.
order : int
Diffraction order.
Returns
-------
wavecal_x : np.array
X pixel coordinate.
wavecal_w : np.array
Wavelength value.
"""
# Get wavelength calibration reference file.
wavemap = fits.getdata(wavemap_file, order)
header = fits.getheader(wavemap_file, order)
ovs = header['OVERSAMP']
pad = header['PADDING']
# Bin the map down to native resolution and remove padding.
nrows, ncols = wavemap.shape
trans_map = wavemap.reshape((nrows // ovs), ovs, (ncols // ovs), ovs)
trans_map = trans_map.mean(1).mean(-1)
trans_map = trans_map[pad:-pad, pad:-pad]
dimy, dimx = np.shape(trans_map)
# Collapse over the spatial dimension.
wavecal_w = np.nanmean(trans_map, axis=0)
wavecal_x = np.arange(dimx)
return wavecal_x, wavecal_w
def init_spec_profile(profile_2d, oversample, padding, subarray,
filename=None):
"""Initialize a specprofile reference file in the format expected by the
JWST DMS.
Parameters
----------
profile_2d : array-like
2D spatial profiles for each order.
oversample : int
Oversampling factor.
padding : int
Amount of padding.
subarray : str
SOSS subarray identifier.
filename : str
Output file name.
Returns
-------
hdul : fits HDUList object
Reference file in correct format.
"""
# Default filename.
if filename is None:
# Output SOSS reference file.
filepattern = 'SOSS_ref_2D_profile_{}.fits'
filename = filepattern.format(subarray)
# Find the indices in the FULL subarray for the requested subarrays.
if subarray == 'FULL':
lrow = 0
urow = oversample * (2048 + 2 * padding)
lcol = 0
ucol = oversample * (2048 + 2 * padding)
elif subarray == 'SUBSTRIP96':
lrow = oversample * (2048 - 246)
urow = oversample * (2048 - 150 + 2 * padding)
lcol = 0
ucol = oversample * (2048 + 2 * padding)
elif subarray == 'SUBSTRIP256':
lrow = oversample * (2048 - 256)
urow = oversample * (2048 + 2 * padding)
lcol = 0
ucol = oversample * (2048 + 2 * padding)
else:
raise ValueError('Unknown subarray: {}'.format(subarray))
# Start building the output fits file.
hdul = list()
hdu = fits.PrimaryHDU()
hdu.header['DATE'] = (datetime.now().strftime('%Y-%m-%dT%H:%M:%S'), 'Date this file was created (UTC)')
hdu.header['ORIGIN'] = ('applesoss', 'Orginazation responsible for creating file')
hdu.header['TELESCOP'] = ('JWST', 'Telescope used to acquire the data')
hdu.header['INSTRUME'] = ('NIRISS', 'Instrument used to acquire the data')
hdu.header['SUBARRAY'] = (subarray, 'Subarray used')
hdu.header['FILENAME'] = (filename, 'Name of the file')
hdu.header['REFTYPE'] = ('SPECPROFILE', 'Reference file type')
hdu.header['PEDIGREE'] = ('GROUND', 'The pedigree of the refernce file')
hdu.header['DESCRIP'] = ('2D trace profile', 'Desription of the reference file')
hdu.header['AUTHOR'] = ('Loic Albert', 'Author of the reference file')
hdu.header['USEAFTER'] = ('2000-01-01T00:00:00', 'Use after date of the reference file')
hdu.header['EXP_TYPE'] = ('NIS_SOSS', 'Type of data in the exposure')
hdul.append(hdu)
# The order 1 profile.
hdu = fits.ImageHDU(profile_2d[lrow:urow, lcol:ucol, 0].astype('float32'))
hdu.header['ORDER'] = (1, 'Spectral order.')
hdu.header['OVERSAMP'] = (oversample, 'Pixel oversampling.')
hdu.header['PADDING'] = (padding, 'Native pixel-size padding around the image.')
hdu.header['EXTNAME'] = 'ORDER'
hdu.header['EXTVER'] = 1
hdul.append(hdu)
# The order 2 profile.
hdu = fits.ImageHDU(profile_2d[lrow:urow, lcol:ucol, 1].astype('float32'))
hdu.header['ORDER'] = (2, 'Spectral order.')
hdu.header['OVERSAMP'] = (oversample, 'Pixel oversampling.')
hdu.header['PADDING'] = (padding, 'Native pixel-size padding around the image.')
hdu.header['EXTNAME'] = 'ORDER'
hdu.header['EXTVER'] = 2
hdul.append(hdu)
# The order 3 profile.
hdu = fits.ImageHDU(profile_2d[lrow:urow, lcol:ucol, 2].astype('float32'))
hdu.header['ORDER'] = (3, 'Spectral order.')
hdu.header['OVERSAMP'] = (oversample, 'Pixel oversampling.')
hdu.header['PADDING'] = (padding, 'Native pixel-size padding around the image.')
hdu.header['EXTNAME'] = 'ORDER'
hdu.header['EXTVER'] = 3
hdul.append(hdu)
# Create HDU list.
hdul = fits.HDUList(hdul)
return hdul
def interpolate_profile(w, w_cen, wavelengths, psfs, psfs_cen, os_factor=10):
"""For efficiency, 1D SOSS PSFs were generated through WebbPSF at
discrete intervals. This function performs the linear interpolation to
construct profiles at a specified wavelength.
Parameters
----------
w : float
Wavelength at which to return a PSF (in µm).
w_cen : float
Centroid position of the profile at w.
wavelengths : array_like
Wavelengths (in µm) corresponding to the PSF array.
psfs : array-like
WebbPSF simulated 1D PSFs.
psfs_cen : array_like
Array of centroid positions for the profiles in psfs.
os_factor : int
Oversampling factor for recentroiding.
Returns
-------
profile : np.array
1D SOSS PSF at wavelength w.
"""
# Get the simulated PSF anchors for the interpolation.
low = np.where(wavelengths < w)[0][0]
up = np.where(wavelengths > w)[0][-1]
anch_low = wavelengths[low]
anch_up = wavelengths[up]
# Shift the anchor profiles to the centroid position of the wavelength of
# interest.
len_psf = np.shape(psfs)[1]
# Oversample
psf_up = np.interp(np.linspace(0, (os_factor*len_psf - 1)/os_factor,
(os_factor*len_psf - 1) + 1),
np.arange(len_psf), psfs[up])
psf_low = np.interp(np.linspace(0, (os_factor*len_psf - 1)/os_factor,
(os_factor*len_psf - 1) + 1),
np.arange(len_psf), psfs[low])
# Shift the profiles to the correct centroid
psf_up = np.interp(np.arange(len_psf*os_factor),
np.arange(len_psf*os_factor) - psfs_cen[up]*os_factor + w_cen*os_factor,
psf_up)
psf_low = np.interp(np.arange(len_psf*os_factor),
np.arange(len_psf*os_factor) - psfs_cen[low]*os_factor + w_cen*os_factor,
psf_low)
# Resample to the native pixel sampling.
psf_up = np.interp(np.arange(len_psf), np.linspace(0, (os_factor*len_psf-1)/os_factor,
(os_factor*len_psf-1)+1),
psf_up)
psf_low = np.interp(np.arange(len_psf), np.linspace(0, (os_factor*len_psf-1)/os_factor,
(os_factor*len_psf-1)+1),
psf_low)
# Assume that the PSF varies linearly over the interval.
# Calculate the weighting coefficients for each anchor.
diff = np.abs(anch_up - anch_low)
weight_low = 1 - (w - anch_low) / diff
weight_up = 1 - (anch_up - w) / diff
# Linearly interpolate the anchor profiles to the wavelength of interest.
profile = np.average(np.array([psf_low, psf_up]),
weights=np.array([weight_low, weight_up]), axis=0)
return profile
def validate_inputs(etrace):
"""Validate the input parameters for the empirical trace construction
module, and determine the correct subarray for the data.
Parameters
----------
etrace : EmpiricalTrace instance
Instance of an EmpiricalTrace object.
Returns
-------
subarray : str
The correct NIRISS/SOSS subarray identifier corresponding to the CLEAR
dataframe.
"""
# Ensure padding and oversampling are integers.
if type(etrace.pad) != int:
raise ValueError('Padding must be an integer.')
if type(etrace.oversample) != int:
raise ValueError('Oversampling factor must be an integer.')
# Determine correct subarray dimensions.
dimy, dimx = np.shape(etrace.clear)
if dimy == 96:
subarray = 'SUBSTRIP96'
elif dimy == 256:
subarray = 'SUBSTRIP256'
elif dimy == 2048:
subarray = 'FULL'
else:
raise ValueError('Unrecognized subarray: {}x{}.'.format(dimy, dimx))
return subarray
def verbose_to_bool(verbose):
"""Convert integer verbose to bool to disable or enable progress bars.
"""
if verbose in [2, 3]:
verbose_bool = False
else:
verbose_bool = True
return verbose_bool
| 14,004 | 33.925187 | 107 | py |
applesoss | applesoss-main/applesoss/plotting.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 22 12:03 2021
@author: MCR
File containing all diagnostic plotting functions for the applesoss.
"""
import matplotlib.pyplot as plt
import numpy as np
def plot_badpix(clear, mask):
"""Plot the difference between the originanl dataframe, and the frame with
bad pixels interpolated.
"""
plt.imshow(clear - mask, origin='lower', aspect='auto',
vmin=-1, vmax=1)
plt.colorbar()
plt.xlabel('Spectral Pixel', fontsize=14)
plt.ylabel('Spatial Pixel', fontsize=14)
plt.show()
def plot_centroid(clear, centroid_dict):
"""Overplot the trace centroids extracted from the data over the data
itself to verify accuracy.
"""
plt.figure(figsize=(15, 3))
for order in centroid_dict.keys():
if order == 'order 1':
plt.plot(centroid_dict[order]['X centroid'],
centroid_dict[order]['Y centroid'], c='black', ls='--',
label='trace centroids')
else:
plt.plot(centroid_dict[order]['X centroid'],
centroid_dict[order]['Y centroid'], c='black', ls='--')
plt.imshow(np.log10(clear), origin='lower', cmap='jet')
plt.xlabel('Spectral Pixel', fontsize=14)
plt.ylabel('Spatial Pixel', fontsize=14)
plt.legend(fontsize=12)
plt.show()
def plot_wing_simulation(stand, halfwidth, wing, wing2, ax, ystart, yend):
"""Do diagnostic plot for wing simulations.
"""
plt.figure(figsize=(8, 5))
plt.plot(np.log10(stand), label='Simulated Profile')
plt.axvline(400 // 2 + halfwidth, ls=':', c='black')
plt.axvline(400 // 2 - halfwidth, ls=':', c='black')
plt.plot(ax[yend:], np.log10(wing), c='red', label='Wing Model')
plt.plot(ax[:ystart], np.log10(wing2), c='red')
plt.legend(fontsize=12)
plt.xlabel('Spatial Pixel', fontsize=14)
plt.show()
| 1,920 | 29.015625 | 78 | py |
applesoss | applesoss-main/applesoss/edgetrigger_centroids.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 04 15:35:32 2020
@author: albert
Functions necessary to locate the centroids of the NIRISS SOSS trace using the
edgetrigger algorithm.
"""
from astropy.io import fits
from matplotlib import colors
import matplotlib.pyplot as plt
import numpy as np
import warnings
from applesoss.edgetrigger_utils import zero_roll, robust_polyfit, get_image_dim
def _plot_centroid(image, xtrace, ytrace):
"""Overplot the extracted trace positions on the image.
Parameters
----------
image : array[float]
A 2D image of the detector.
xtrace : array[float]
The x coordinates of the trace to overplot on the image.
ytrace : array[float]
The y coordinates of the trace to overplot on the image.
"""
nrows, ncols = image.shape
if nrows == ncols:
aspect = 1
figsize = ncols/64, nrows/64
else:
aspect = 2
figsize = ncols/64, nrows/32
plt.figure(figsize=figsize)
plt.title('Trace Centroids')
plt.imshow(image, origin='lower', cmap='inferno', norm=colors.LogNorm(),
aspect=aspect)
plt.plot(xtrace, ytrace, lw=2, ls='--', c='black', label='Centroids')
plt.xlabel('Spectral Pixel', fontsize=14)
plt.ylabel('Spatial Pixel', fontsize=14)
plt.legend(fontsize=12)
plt.xlim(-0.5, ncols - 0.5)
plt.ylim(-0.5, nrows - 0.5)
plt.tight_layout()
plt.show()
plt.close()
def _plot_centroids(image, centroids):
"""Visualize the trace extracted by get_soss_centroids().
Parameters
----------
image : array[float]
A 2D image of the detector.
centroids : dict
A dictionary containg the trace, as returned by get_soss_centroids().
"""
# Determine an appropriate figure size.
nrows, ncols = image.shape
if nrows == ncols:
aspect = 1
figsize = ncols/64, nrows/64
else:
aspect = 2
figsize = ncols/64, nrows/32
# Make a figure showing the trace for all 3 orders.
plt.figure(figsize=figsize)
plt.title('Trace Positions')
plt.imshow(image, origin='lower', cmap='inferno', norm=colors.LogNorm(),
aspect=aspect)
tmp = centroids['order 1']
plt.plot(tmp['X centroid'], tmp['Y centroid'], color='orange',
label='Order 1')
plt.plot(tmp['X centroid'], tmp['Y centroid'] - tmp['trace widths']/2,
color='orange')
plt.plot(tmp['X centroid'], tmp['Y centroid'] + tmp['trace widths']/2,
color='orange')
if 'order 2' in centroids:
tmp = centroids['order 2']
plt.plot(tmp['X centroid'], tmp['Y centroid'], color='black',
label='Order 2')
plt.plot(tmp['X centroid'], tmp['Y centroid'] - tmp['trace widths']/2,
color='black')
plt.plot(tmp['X centroid'], tmp['Y centroid'] + tmp['trace widths']/2,
color='black')
if 'order 3' in centroids:
tmp = centroids['order 3']
plt.plot(tmp['X centroid'], tmp['Y centroid'], color='red',
label='Order 3')
plt.plot(tmp['X centroid'], tmp['Y centroid'] - tmp['trace widths']/2,
color='red')
plt.plot(tmp['X centroid'], tmp['Y centroid'] + tmp['trace widths']/2,
color='red')
plt.xlabel('Spectral Pixel', fontsize=14)
plt.ylabel('Spatial Pixel', fontsize=14)
plt.legend(fontsize=12)
plt.xlim(-0.5, ncols - 0.5)
plt.ylim(-0.5, nrows - 0.5)
plt.tight_layout()
plt.show()
plt.close()
def edge_trigger(image, halfwidth=5, yos=1, verbose=False, outdir=None):
"""Detect the edges and center of the trace based on the minima and maxima
of the derivative of the columns, which is computed in a running window
along the columns of the detector image
Parameters
----------
image : array[float]
A 2D image of the detector.
halfwidth : int
Size of the window used when computing the derivatives.
yos : int
Oversampling factor of the image array along the y-direction.
verbose : bool
If set True some diagnostic plots will be made.
outdir : str
Directory to which to save results.
Returns
-------
ytrace_max : array[float]
Upper edge of the trace.
ytrace_min : array[float]
Lower edge of the trace.
ytrace_best : array[float]
Center of the trace.
widths_best : array[float]
Widths of the trace.
"""
dimy, dimx = image.shape
halfwidth = halfwidth * yos
# Create coordinate arrays.
xpix = np.arange(dimx)
ypix = np.arange(dimy)
_, ygrid = np.meshgrid(xpix, ypix)
# Compute windowed slopes over the columns.
slopevals = np.zeros_like(image)
for irow in range(halfwidth, dimy-halfwidth):
# Compute the window indices.
ymin = irow - halfwidth
ymax = irow + halfwidth + 1
# Get the x and y data to find the slope to.
datay = image[ymin:ymax, :]
mask = np.isfinite(datay)
# Need to set values NaN in y to NaN in x.
datax = np.where(mask, ygrid[ymin:ymax, :], np.nan)
# Compute the slope.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
xmean = np.nanmean(datax, axis=0, keepdims=True)
ymean = np.nanmean(datay, axis=0, keepdims=True)
with np.errstate(invalid='ignore'):
num = np.nansum((datax - xmean) * (datay - ymean), axis=0)
denom = np.nansum((datax - xmean) ** 2, axis=0)
slope = num / denom
# Set slopes computed from < 3 datapoints to NaN.
slopevals[irow, :] = np.where(np.sum(mask, axis=0) >= 3, slope, 0.)
# Find the upper and lower bounds on the trace.
args = np.nanargmax(slopevals, axis=0)
vals = np.nanmax(slopevals, axis=0)
ytrace_max = np.where(vals != 0, ypix[args], np.nan)
args = np.nanargmin(slopevals, axis=0)
vals = np.nanmin(slopevals, axis=0)
ytrace_min = np.where(vals != 0, ypix[args], np.nan)
# Scan through a range of trace widths.
slopes_best = np.zeros_like(xpix)
ytrace_best = np.zeros_like(xpix)
widths_best = np.zeros_like(xpix)
for width in range(18*yos, 27*yos):
# Add the slope and its offset negative.
comb = slopevals - zero_roll(slopevals, -width)
# Find the maximum resulting slope.
args = np.nanargmax(comb, axis=0)
vals = np.nanmax(comb, axis=0)
# Update the best values.
mask = (vals > slopes_best)
slopes_best = np.where(mask, vals, slopes_best)
ytrace_best = np.where(mask, ypix[args], ytrace_best)
widths_best = np.where(mask, width, widths_best)
# Set the y position to NaN if the best slope was zero.
ytrace_best = np.where(slopes_best != 0, ytrace_best + widths_best/2.,
np.nan)
widths_best = np.where(slopes_best != 0, widths_best, np.nan)
if verbose:
nrows, ncols = image.shape
plt.figure(figsize=(ncols/128, nrows/128))
plt.title('Edge-trigger Trace Positions')
plt.imshow(image, origin='lower', cmap='inferno',
norm=colors.LogNorm())
plt.plot(ytrace_min, lw=2, ls='--', c='black', label='Edges')
plt.plot(ytrace_max, lw=2, ls='--', c='black')
plt.plot(ytrace_best, lw=2, c='black', label='Centroids')
plt.xlabel('Spectral Pixel', fontsize=14)
plt.ylabel('Spatial Pixel', fontsize=14)
plt.legend(fontsize=12)
plt.tight_layout()
if outdir is not None:
plt.savefig(outdir+'/edge_trigger_ymedian{:4.0f}.png'.format(np.nanmedian(ytrace_best)))
plt.show()
plt.close()
return ytrace_max, ytrace_min, ytrace_best, widths_best
def get_centroids_edgetrigger(image, header=None, mask=None, poly_order=11,
halfwidth=5, mode='combined', verbose=False,
outdir=None):
"""Determine the x, y coordinates of the trace using the derivatives along
the y-axis. Works for either order if there is no contamination.
Parameters
----------
image : array[float]
A 2D image of the detector.
header : astropy.io.fits.Header instance
The header from one of the SOSS reference files.
mask : array[bool]
A boolean array of the same shape as image. Pixels corresponding to
True values will be masked.
poly_order : int, None
Order of the polynomial to fit to the extracted trace positions.
halfwidth : int
Size of the window used when computing the derivatives.
mode : str
Which trace values to use. Can be 'bottomedge', 'topedge', 'mean' or
'combined'.
verbose : bool
If set True some diagnostic plots will be made.
outdir : str
Directory to which to save results.
Returns
-------
xtrace : array[float]
X-coordinates of the trace.
ytrace : array[float]
Y-coordinates of the trace.
tracewidth : array[float]
Trace widths.
param : array[float]
Best fittinng polynomial parameters.
"""
# If no mask was given use all pixels.
if mask is None:
mask = np.zeros_like(image, dtype='bool')
# Call the script that determines the dimensions of the image.
result = get_image_dim(image, header=header, verbose=verbose)
dimx, dimy, xos, yos, xnative, ynative, padding, refpix_mask = result
# Replace masked pixel values with NaNs.
image_masked = np.where(mask | ~refpix_mask, np.nan, image)
# Use edge trigger to compute the edges and center of the trace.
fkwargs = dict(halfwidth=halfwidth, yos=yos, verbose=verbose,
outdir=outdir)
edge_outs = edge_trigger(image_masked, **fkwargs)
ytrace_max, ytrace_min, ytrace_best, widths_best = edge_outs
# Use different y-positions depending on the mode parameter.
if mode == 'bottomedge':
ytrace = ytrace_max
elif mode == 'topedge':
ytrace = ytrace_min
elif mode == 'mean':
ytrace = (ytrace_min + ytrace_max)/2.
elif mode == 'combined':
ytrace = ytrace_best
else:
raise ValueError('Unknown mode: {}'.format(mode))
# Fit the y-positions with a polynomial and use result as true y-positions.
xtrace = np.arange(dimx)
mask = np.isfinite(ytrace)
# If no polynomial order was given return the raw measurements.
if poly_order is None:
param = []
else:
param = robust_polyfit(xtrace[mask], ytrace[mask], poly_order)
ytrace = np.polyval(param, xtrace)
# If verbose visualize the result.
if verbose is True:
_plot_centroid(image_masked, xtrace, ytrace)
return xtrace, ytrace, widths_best, param
def build_mask_vertical(shape, xlims, mask_right=True, mask_between=True):
"""Mask along the vertical(s) given by xlims. If xlims contains 1 element
masks pixels blue-wards or red-wards according to the value of mask_blue
(and mask_between is ignored). If xlims contains 2 elements masks pixels
between or outside these values according to the value of mask_between
(and mask_blue is ignored).
Parameters
----------
shape : tuple[int]
Tuple containing the intended shape of the mask array.
xlims : list[float]
The column indices to use as the limits of the masked area.
mask_right : bool
If True mask pixels to the right of xlims, otherwise mask to the left.
mask_between : bool
If True mask pixels between xlims, otherwise mask= outside.
Returns
-------
mask : array[bool]
A mask the removes a vertical region according to xlims.
"""
dimy, dimx = shape
# Create a coordinate grid.
x = np.arange(dimx)
y = np.arange(dimy)
xgrid, _ = np.meshgrid(x, y)
if np.size(xlims) == 1:
# Mask blue-wards or red-wards of a single value.
if mask_right:
mask = xgrid >= xlims[0]
else:
mask = xgrid < xlims[0]
elif np.size(xlims) == 2:
# Mask between or exterior to two values.
if mask_between:
mask = (xgrid >= xlims[0]) & (xgrid < xlims[1])
else:
mask = (xgrid < xlims[0]) | (xgrid >= xlims[1])
else:
msg = 'xlims must be a list or array of up to two indices.'
raise ValueError(msg)
return mask
def build_mask_sloped(shape, point1, point2, mask_above=True, verbose=False):
"""Mask pixels above or below the boundary line defined by point1 and
point2.
Parameters
----------
shape : tuple[int]
Tuple containing the intended shape of the mask array.
point1 : list[float]
The first x, y pair defining the boundary line.
point2 : list[float]
The second x, y pair defining the boundary line.
mask_above : bool
If True mask pixels above the boundary line, else mask below.
verbose : bool
If True be verbose.
Returns
-------
mask : array[bool]
A mask the removes a diagonal region along the slope defined by point1
and point2.
"""
dimy, dimx = shape
# Obtain the parameters of the line by fitting the point.
xvals = np.array([point1[0], point2[0]])
yvals = np.array([point1[1], point2[1]])
param = np.polyfit(xvals, yvals, 1)
# Compute the position of the line at every x position.
xline = np.arange(dimx)
yline = np.polyval(param, xline)
if verbose:
print('line fit param:', param)
# Create a coordinate grid.
x = np.arange(dimx)
y = np.arange(dimy)
_, ygrid = np.meshgrid(x, y)
# Mask pixels above or below the boundary line.
if mask_above:
mask = (ygrid - yline) >= 0
else:
mask = (ygrid - yline) < 0
return mask
def build_mask_256(subarray='SUBSTRIP256', apex_order1=None):
"""Restrict the analysis to a (N, 2048) section of the image, where N is
256 or less. Normally this only applies to the FULL subarray, masking
everything but the SUBSTRIP256 region. When apex_order1 is given rows from
apex_order1 - 40 to apex_order1 + 216 are kept instead.
Parameters
----------
subarray : str
The subarray for which to build a mask.
apex_order1 : float
The y-position of the order1 apex at 1.3 microns, in the given
subarray.
Returns
-------
mask_256 : array[bool]
A mask that removes any area not related to the trace of the target.
"""
dimx = 2048
# Check the subarray value and set dimy accordingly.
if subarray == 'FULL':
dimy = 2048
elif subarray == 'SUBSTRIP96':
dimy = 96
elif subarray == 'SUBSTRIP256':
dimy = 256
else:
msg = 'Unknown subarray: {}'
raise ValueError(msg.format(subarray))
if apex_order1 is None:
apex_order1 = 40 # Assuming SUBSTRIP256.
if subarray == 'FULL':
apex_order1 += 1792
if subarray == 'SUBSTRIP96':
apex_order1 += -10
# Round the apex value to the nearest integer.
apex_order1 = int(apex_order1)
# Prepare the mask array.
mask_256 = np.ones((dimy, dimx), dtype='bool')
# Keep only the 256 region around the apex_order1 value.
# In SUBSTRIP256 the apex would be at y ~ 40.
rowmin = np.maximum(apex_order1 - 40, 0)
rowmax = np.minimum(apex_order1 + 216, dimy)
mask_256[rowmin:rowmax, :] = False
return mask_256
def build_mask_trace(ytrace, subarray='SUBSTRIP256', halfwidth=30,
extend_below=False, extend_above=False):
"""Mask out the trace in a given subarray based on the y-positions
provided. A band of pixels around the trace position of
width = 2*halfwidth will be masked. Optionally extend_above and
extend_below can be used to mask all pixels above or below the trace.
Parameters
----------
ytrace : array[float]
The trace y-position at each column, must have shape = (2048,).
subarray : str
The subarray for which to build a mask.
halfwidth : float
The size of the window to mask around the trace.
extend_below : bool
If True mask all pixels above the trace.
extend_above : bool
If True mask all pixels below the trace.
Returns
-------
mask_trace : array[bool]
A mask that removes an area centered on the given trace positions.
"""
dimx = 2048
# Check the shape of the y-positions.
if np.shape(ytrace) != (dimx,):
msg = 'ytrace must have shape (2048,)'
raise ValueError(msg)
# Check the subarray value and set dimy accordingly.
if subarray == 'FULL':
dimy = 2048
elif subarray == 'SUBSTRIP96':
dimy = 96
elif subarray == 'SUBSTRIP256':
dimy = 256
else:
msg = 'Unknown subarray: {}'
raise ValueError(msg.format(subarray))
# Cannot both be True, that would mask everything.
if extend_below and extend_above:
msg = 'Only one of extend_below, extend_above should be used.'
raise ValueError(msg)
# Create a coordinate grid.
x = np.arange(dimx)
y = np.arange(dimy)
_, ygrid = np.meshgrid(x, y)
# Mask the pixels within a halfwidth of the trace center.
mask_trace = np.abs(ygrid - ytrace) < halfwidth
# If True mask all pixels below the trace center.
if extend_below:
mask_below = (ygrid - ytrace) < 0
mask_trace = mask_trace | mask_below
# If True mask all pixels above the trace center.
if extend_above:
mask_above = (ygrid - ytrace) >= 0
mask_trace = mask_trace | mask_above
return mask_trace
def build_mask_order2_contaminated(ytrace_o1, ytrace_o3,
subarray='SUBSTRIP256', halfwidth_o1=25,
halfwidth_o3=15, xlim=150):
"""Build a mask that isolates the contaminated part of the order 2 trace.
This is done by masking the order 1 trace and averything below, the order
2 trace and everything above and all pixels blue-ward (to the right) of
xlim.
Parameters
----------
ytrace_o1 : array[float]
Y position of the order 1 trace at every column.
ytrace_o3 : array[float]
Y position of the order 3 trace at every column.
subarray : str
The subarray for which to build a mask.
halfwidth_o1 : float
The size of the window to mask around the order 1 trace.
halfwidth_o3 : float
The size of the window to mask around the order 3 trace.
xlim : float
The boundary for masking pixels blue-wards (to the right).
Returns
-------
mask : array[bool]
A mask that removes everything but the contaminated part of the order
2 trace.
"""
dimx = 2048
if subarray == 'FULL':
dimy = 2048
elif subarray == 'SUBSTRIP96':
dimy = 96
elif subarray == 'SUBSTRIP256':
dimy = 256
else:
msg = 'Unknown subarray: {}'
raise ValueError(msg.format(subarray))
# Mask the order 1 trace and everything below.
mask_trace_o1 = build_mask_trace(ytrace_o1, subarray=subarray,
halfwidth=halfwidth_o1,
extend_below=True)
# Mask the order 3 trace and everything above.
mask_trace_o3 = build_mask_trace(ytrace_o3, subarray=subarray,
halfwidth=halfwidth_o3,
extend_above=True)
# Mask all pixels blue-ward of xlim.
mask_blue = build_mask_vertical((dimy, dimx), xlims=[xlim],
mask_right=True)
# Combine the masks.
mask = mask_trace_o1 | mask_trace_o3 | mask_blue
return mask
def build_mask_order2_uncontaminated(ytrace_o1, ytrace_o3,
subarray='SUBSTRIP256', halfwidth_o1=25,
halfwidth_o3=15, xlims=None, point1=None,
point2=None, apex_order1=None):
"""Build a mask that isolates the uncontaminated part of the order 2 trace.
This is done by masking the order 1 trace and averything below, the order
2 trace and everything above, all pixels outside of the range defined by
xlims and all pixels below the line defined by point 1 and point 2.
Parameters
----------
ytrace_o1 : array[float]
Y position of the order 1 trace at every column.
ytrace_o3 : array[float]
Y position of the order 3 trace at every column.
subarray : str
The subarray for which to build a mask.
halfwidth_o1 : float
The size of the window to mask around the order 1 trace.
halfwidth_o3 : float
The size of the window to mask around the order 3 trace.
xlims : list[float]
X-pixel limits.
point1 : list[float]
The first x, y pair defining the boundary line.
point2 : list[float]
The second x, y pair defining the boundary line.
apex_order1 : float, np.ndarray[float]
The y-position of the order1 apex at 1.3 microns, in the given
subarray.
Returns
-------
mask : array[bool]
A mask that removes everything but the uncontaminated part of the
order 2 trace.
"""
dimx = 2048
if subarray == 'FULL':
dimy = 2048
elif subarray == 'SUBSTRIP96':
dimy = 96
elif subarray == 'SUBSTRIP256':
dimy = 256
else:
msg = 'Unknown subarray: {}'
raise ValueError(msg.format(subarray))
if xlims is None:
xlims = [700, 1800]
if (point1 is None) ^ (point2 is None):
msg = 'point1 and point2 must both be None or both be set.'
raise ValueError(msg)
elif (point1 is None) & (point2 is None):
# If no points were given use default values.
point1 = [1249, 31] # Assuming SUBSTRIP256.
point2 = [1911, 253] # Assuming SUBSTRIP256.
if subarray == 'FULL':
point1[1] += 1792
point2[1] += 1792
if subarray == 'SUBSTRIP96':
point1[1] += -10
point2[1] += -10
# If apex_order1 was given shift the points as needed.
if apex_order1 is not None:
apex_default = 40 # Assuming SUBSTRIP256.
if subarray == 'FULL':
apex_default += 1792
if subarray == 'SUBSTRIP96':
apex_default += -10
# Shift points based on apex_order1.
offset = apex_order1 - apex_default
point1[1] += offset
point2[1] += offset
else:
msg = ('Using user-provided values for point1 and point2, '
'apex_order1 will be ignored.')
print(msg)
# Mask the order 1 trace and everything below.
mask_trace_o1 = build_mask_trace(ytrace_o1, subarray=subarray,
halfwidth=halfwidth_o1,
extend_below=True)
# Mask the order 3 trace and everything above.
mask_trace_o3 = build_mask_trace(ytrace_o3, subarray=subarray,
halfwidth=halfwidth_o3,
extend_above=True)
# Mask what is on the left side where orders 1 and 2 are well blended
mask_vertical = build_mask_vertical((dimy, dimx), xlims,
mask_between=False)
# Mask the corner below the order 2 trace to remove the wings of the
# order 1 trace.
mask_sloped = build_mask_sloped((dimy, dimx), point1, point2,
mask_above=False)
# Combine the masks.
mask = (mask_trace_o1 | mask_trace_o3 | mask_vertical | mask_sloped)
return mask
def build_mask_order3(subarray='SUBSTRIP256', xlim=None, point1=None,
point2=None, apex_order1=None):
"""Builds a mask that isolates the order 3 trace.
This done by masking all pixels blue-wards (to the right) of xlim where the
order 3 transmission goes to zero, and all pixels below the line defined
by point1 and point2 (the order1 trace and order 2 trace).
Parameters
----------
subarray : str
The subarray for which to build a mask.
xlim : float
The boundary for masking pixels blue-ward (to the right).
point1 : list[float]
The first x, y pair defining the boundary line.
point2 : list[float]
The second x, y pair defining the boundary line.
apex_order1 : float, np.ndarray[float]
The y-position of the order1 apex at 1.3 microns, in the given
subarray.
Returns
-------
mask : array[bool]
A mask that removes everything but the order 3 trace.
"""
dimx = 2048
if subarray == 'FULL':
dimy = 2048
elif subarray == 'SUBSTRIP96':
dimy = 96
elif subarray == 'SUBSTRIP256':
dimy = 256
else:
msg = 'Unknown subarray: {}'
raise ValueError(msg.format(subarray))
if subarray == 'SUBSTRIP96':
# Create an empty mask.
mask = np.zeros((dimy, dimx), dtype='bool')
# Nothing to be done because order 3 can not be present.
print('Warning. No mask produced for order 3 when subarray=SUBSTRIP96')
return mask
if xlim is None:
xlim = 700
if (point1 is None) ^ (point2 is None):
msg = 'point1 and point2 must both be None or both be set.'
raise ValueError(msg)
elif (point1 is None) & (point2 is None):
# If no points were given use default values.
point1 = [0, 132] # Assuming SUBSTRIP256.
point2 = [1000, 163] # Assuming SUBSTRIP256.
if subarray == 'FULL':
point1[1] += 1792
point2[1] += 1792
if subarray == 'SUBSTRIP96':
point1[1] += -10
point2[1] += -10
# If apex_order1 was given shift the points as needed.
if apex_order1 is not None:
apex_default = 40 # Assuming SUBSTRIP256.
if subarray == 'FULL':
apex_default += 1792
if subarray == 'SUBSTRIP96':
apex_default += -10
# Shift points based on apex_order1.
offset = apex_order1 - apex_default
point1[1] += offset
point2[1] += offset
else:
msg = ('Using user-provided values for point1 and point2, '
'apex_order1 will be ignored.')
print(msg)
# Check how close the boundary line is to the top of the subarray.
if point1[1] > (dimy - 25 - 10):
msg = ('Warning: masking for order 3 leaves too little of '
'order 3 to fit position.')
print(msg)
# Mask everything beyond where the order 3 transmission approaches zero.
mask_vertical = build_mask_vertical((dimy, dimx), [xlim], mask_right=True)
# Mask everything below order 3.
mask_sloped = build_mask_sloped((dimy, dimx), point1, point2,
mask_above=False)
# Combine the masks.
mask = mask_vertical | mask_sloped
return mask
def wavelength_calibration(tracetable, xpos, order=1):
"""Find the wavelengths corresponding to a set of x-positions using the
trace table reference file.
Parameters
----------
tracetable : str
Path to SOSS tracetable reference file.
xpos : array[float]
The array of x-positions to calibrate.
order : int
The trace order the x-positions correspond to.
Returns
-------
wavelengths : array[float]
An array of wavelengths corresponding to xpos.
"""
try:
# USE THE REFERENCE FILE IF IT CAN BE FOUND
# Read the wavelength vs x-position relation from the reference file.
tt = fits.getdata(tracetable, order)
ref_wavelengths, ref_xpos = tt['WAVELENGTH'], tt['X']
# Sort so the reference positions are in ascending order.
args = np.argsort(ref_xpos)
ref_xpos, ref_wavelengths = ref_xpos[args], ref_wavelengths[args]
# Find the wavelengths corresponding to the input array via
# interpolation.
wavelengths = np.interp(xpos, ref_xpos, ref_wavelengths)
except:
# USE AN APPROXIMATE WAVELENGTH CALIBRATION OTHERWISE
if order == 1:
dispersion = -0.9718 # nm/pixel
w0 = 2.833
elif order == 2:
dispersion = -0.467
w0 = 1.423
else:
dispersion = -0.310
w0 = 0.956
wavelengths = w0 + xpos * (dispersion / 1000)
return wavelengths
def calibrate_widths(tracetable, width_o1, width_o2=None, width_o3=None,
verbose=False, outdir=None):
"""Fit an exponential function to the wavelength-width relation, for use
obtaining the contaminated order 2 trace positions.
Parameters
----------
tracetable : str
Path to SOSS tracetable reference file.
width_o1 : array[float]
The order 1 trace width at each column, must have shape = (2048,).
width_o2 : array[float]
The order 2 trace width at each column, must have shape = (2048,).
width_o3 : array[float]
The order 3 trace width at each column, must have shape = (2048,).
verbose : bool
If True some diagnostic plots will be made.
outdir : str
Directry to which to save results.
Returns
-------
pars_width : list[float]
List containing the best-fit parameters for the wavelength-width
relation.
"""
dimx = 2048
# Check the shapes of the widths.
if np.shape(width_o1) != (dimx,):
msg = 'width_o1 must have shape (2048,)'
raise ValueError(msg)
if width_o2 is not None:
if np.shape(width_o2) != (dimx,):
msg = 'width_o2_uncont must have shape (2048,)'
raise ValueError(msg)
else:
width_o2 = np.full(dimx, fill_value=np.nan)
if width_o3 is not None:
if np.shape(width_o3) != (dimx,):
msg = 'width_o3_uncont must have shape (2048,)'
raise ValueError(msg)
else:
width_o3 = np.full(dimx, fill_value=np.nan)
# Convert pixel positions to wavelengths for each order.
x = np.arange(dimx)
lba_o1 = wavelength_calibration(tracetable, x, order=1)
lba_o2 = wavelength_calibration(tracetable, x, order=2)
lba_o3 = wavelength_calibration(tracetable, x, order=3)
# Join data from different orders.
lba_all = np.concatenate((lba_o1, lba_o2, lba_o3), axis=None)
width_all = np.concatenate((width_o1, width_o2, width_o3), axis=None)
# Fit the wavelength vs width of order 1 and 2 using an exponential model.
mask = np.isfinite(width_all) & np.isfinite(lba_all)
pars_width = robust_polyfit(np.log(lba_all[mask]),
np.log(width_all[mask]), 1)
# Make a figure of the trace width versus the wavelength
if verbose:
# Evalaute the best-fit model.
lba_fit = np.linspace(np.nanmin(lba_all), np.nanmax(lba_all), 101)
w0, m = np.exp(pars_width[1]), pars_width[0] # w = w0 * lba^m
width_fit = w0 * lba_fit ** m
# Make the figure.
plt.figure(figsize=(8, 5))
plt.scatter(lba_o1, width_o1, marker=',', s=1, color='red',
label='Order 1')
if np.any(np.isfinite(width_o2)):
plt.scatter(lba_o2, width_o2 + 0.05, marker=',', s=1,
color='orange', label='Order 2')
if np.any(np.isfinite(width_o3)):
plt.scatter(lba_o3, width_o3 + 0.10, marker=',', s=1, color='navy',
label='Order 3')
plt.plot(lba_fit, width_fit, color='black', linewidth=5,
label='Joint Fit:\nwidth = {:6.2F} $\\lambda**({:6.4F})$'.format(w0, m))
plt.xlabel('Wavelength (microns)', fontsize=12)
plt.ylabel('Trace Width (pixels)', fontsize=12)
plt.legend(fontsize=12)
plt.tight_layout()
if outdir is not None:
plt.savefig(outdir+'/soss_centroids_calibrate_width.png')
plt.show()
plt.close()
return pars_width
def get_soss_centroids(image, tracetable, mask=None, subarray='SUBSTRIP256',
halfwidth=2, poly_orders=None, apex_order1=None,
calibrate=True, verbose=False, outdir=None):
"""Determine the traces positions on a real image (native size) with as
few assumptions as possible using the 'edge trigger' method.
The algorithm assumes:
1) The brightest order is order 1 and the target order 1 is the brightest
of all order 1 traces present.
2) Order 2 has a minimum in transmission between ~1.0 and ~1.2 microns.
3) Order 2 widths are the same as order 1 width for the same wavelengths.
Parameters
----------
image : array[float]
A 2D image of the detector.
tracetable : str
Path to SOSS tracetable reference file.
mask : array[bool]
A boolean array of the same shape as image. Pixels corresponding to
True values will be masked.
subarray : str
The subarray for which to build a mask.
halfwidth : int
The size of the window used when computing the derivatives of the
'edge trigger' method.
poly_orders : dict
Dictionary of polynomial orders to fit to the extracted trace
positions for each spectral order.
apex_order1 : float
The y-position of the order1 apex at 1.3 microns, in the given
subarray. A rough estimate is sufficient as it is only used to mask
rows when subarray='FULL' to ensure that the target of interest is
detected instead of a field target.
calibrate : bool
If True model the wavelength trace width relation, otherwise use the
CV3 parameters. Default is True.
verbose : bool
If set True some diagnostic plots will be made. Default is False.
outdir : str
Directory to which to save results.
Returns
-------
trace_dict : dict
A dictionary containing the trace x, y, width and polynomial fit
parameters for each order.
"""
default_orders = {'order 1': 11,
'order 2': 5,
'order 3': 3}
if poly_orders is not None:
default_orders = {**default_orders, **poly_orders}
# Initialize output dictionary.
centroids = dict()
# Build a mask that restricts the analysis to a SUBSTRIP256-like region
# centered on the target trace.
mask_256 = build_mask_256(subarray=subarray, apex_order1=apex_order1)
# Combine the subsection mask with the user specified mask.
if mask is not None:
mask_256 = mask_256 | mask
if verbose & (outdir is not None):
hdu = fits.PrimaryHDU()
hdu.data = np.where(mask_256, np.nan, image)
hdu.writeto(outdir+'/mask_256.fits', overwrite=True)
# Get the order 1 trace position.
result = get_centroids_edgetrigger(image, mask=mask_256,
poly_order=default_orders['order 1'],
halfwidth=halfwidth, mode='combined',
verbose=verbose, outdir=outdir)
x_o1, y_o1, w_o1, par_o1 = result
# Add parameters to output dictionary.
o1_dict = dict()
o1_dict['X centroid'] = x_o1
o1_dict['Y centroid'] = y_o1
o1_dict['trace widths'] = w_o1
o1_dict['poly coefs'] = par_o1
centroids['order 1'] = o1_dict
# For SUBSTRIP96 only the order 1 can be measured.
if subarray == 'SUBSTRIP96':
if verbose:
# Make a figure showing the order 1 trace.
_plot_centroids(image, centroids)
return centroids
# Update the order1 apex based on the extracted trace.
apex_order1 = np.nanmin(y_o1)
# Make a mask to isolate the order 3 trace and combine it with the
# user-specified mask.
mask_o3 = build_mask_order3(subarray=subarray, apex_order1=apex_order1)
if mask is not None:
mask_o3 = mask_o3 | mask
if verbose & (outdir is not None):
hdu = fits.PrimaryHDU()
hdu.data = np.where(mask_o3, np.nan, image)
hdu.writeto(outdir+'/mask_o3.fits', overwrite=True)
# Get the order 3 trace position.
result = get_centroids_edgetrigger(image, mask=mask_o3,
poly_order=default_orders['order 3'],
halfwidth=halfwidth, mode='combined',
verbose=verbose, outdir=outdir)
x_o3, y_o3, w_o3, par_o3 = result
# Add parameters to output dictionary.
o3_dict = dict()
o3_dict['X centroid'] = x_o3
o3_dict['Y centroid'] = y_o3
o3_dict['trace widths'] = w_o3
o3_dict['poly coefs'] = par_o3
centroids['order 3'] = o3_dict
# Make masks for the second order trace - split in two segments:
# A) Uncontaminated region 700 < x < 1800 - fit both edges combined
# (default).
# B) Contaminated region (x = 0-200) - fit only the top edge.
# Make a mask to isolate the uncontaminated order 2 trace and combine it
# with the user-specified mask.
mask_o2_uncont = build_mask_order2_uncontaminated(y_o1, y_o3,
subarray=subarray,
apex_order1=apex_order1)
if mask is not None:
mask_o2_uncont = mask_o2_uncont | mask
if verbose & (outdir is not None):
hdu = fits.PrimaryHDU()
hdu.data = np.where(mask_o2_uncont, np.nan, image)
hdu.writeto(outdir+'/mask_o2_uncont.fits', overwrite=True)
# Get the raw trace positions for the uncontaminated part of the order 2
# trace.
result = get_centroids_edgetrigger(image, mask=mask_o2_uncont,
poly_order=None, halfwidth=halfwidth,
mode='combined', verbose=verbose,
outdir=outdir)
x_o2_uncont, y_o2_uncont, w_o2_uncont, par_o2_uncont = result
if calibrate:
pars_width = calibrate_widths(tracetable, w_o1, w_o2_uncont,
verbose=verbose, outdir=outdir)
else:
# Use pre-computed parameters from the CV3 deepstack.
pars_width = [-0.20711659, 3.16387517]
w0, m = np.exp(pars_width[1]), pars_width[0] # w = w0 * lba^m
# Make a mask to isolate the contaminated order 2 trace and combine it
# with the user-specified mask.
mask_o2_cont = build_mask_order2_contaminated(y_o1, y_o3,
subarray=subarray)
if mask is not None:
mask_o2_cont = mask_o2_cont | mask
if verbose & (outdir is not None):
hdu = fits.PrimaryHDU()
hdu.data = np.where(mask_o2_cont, np.nan, image)
hdu.writeto(outdir+'/mask_o2_cont.fits', overwrite=True)
# Get the raw top-edge poistions of the contaminated order 2 trace.
result = get_centroids_edgetrigger(image, mask=mask_o2_cont,
poly_order=None, halfwidth=halfwidth,
mode='topedge', verbose=verbose,
outdir=outdir)
x_o2_top, y_o2_top, w_o2_top, par_o2_top = result
# Convert pixel positions to wavelengths for order 2.
lba_o2_top = wavelength_calibration(tracetable, x_o2_top, order=2)
# Use the wavelength width relation to obtain the order 2 trace width.
w_o2_cont = np.where(np.isfinite(w_o2_top), w0 * lba_o2_top**m, np.nan)
# Finally combine the top-edge positions and the width to get an estimate
# of the trace center.
x_o2_cont = np.copy(x_o2_top)
y_o2_cont = y_o2_top - w_o2_cont/2.
# Combine the trace positions from the uncontaminated and contaminated
# sections.
mask_comb = np.isfinite(y_o2_uncont)
x_o2 = np.where(mask_comb, x_o2_uncont, x_o2_cont)
y_o2 = np.where(mask_comb, y_o2_uncont, y_o2_cont)
w_o2 = np.where(mask_comb, w_o2_uncont, w_o2_cont)
# Fit the combined order 2 trace position with a polynomial.
mask_fit = np.isfinite(x_o2) & np.isfinite(y_o2)
if default_orders['order 2'] is None:
par_o2 = []
else:
par_o2 = robust_polyfit(x_o2[mask_fit], y_o2[mask_fit],
default_orders['order 2'])
y_o2 = np.polyval(par_o2, x_o2)
if verbose:
# Determine an appropriate figure size.
nrows, ncols = image.shape
if subarray == 'FULL':
aspect = 1
figsize = ncols/64, nrows/64
else:
aspect = 2
figsize = ncols/64, nrows/32
# Make a figure showing how the order 2 trace was built from segments
# A and B.
plt.figure(figsize=figsize)
plt.title('Order 2 Trace Positions')
plt.imshow(image, origin='lower', cmap='inferno',
norm=colors.LogNorm(), aspect=aspect)
plt.plot(x_o2_cont, y_o2_cont, color='red', label='Contaminated')
plt.plot(x_o2_uncont, y_o2_uncont, color='navy',
label='Uncontaminated')
plt.plot(x_o2, y_o2, color='black', label='Polynomial Fit')
plt.xlabel('Spectral Pixel', fontsize=14)
plt.ylabel('Spatial Pixel', fontsize=14)
plt.legend(fontsize=12)
plt.xlim(-0.5, ncols - 0.5)
plt.ylim(-0.5, nrows - 0.5)
plt.tight_layout()
if outdir is not None:
plt.savefig(outdir+'/soss_centroid_order2tracepositions.png')
plt.show()
plt.close()
# Add parameters to output dictionary.
o2_dict = dict()
o2_dict['X centroid'] = x_o2
o2_dict['Y centroid'] = y_o2
o2_dict['trace widths'] = w_o2
o2_dict['poly coefs'] = par_o2
centroids['order 2'] = o2_dict
if verbose:
# Make a figure showing the trace for all orders.
_plot_centroids(image, centroids)
return centroids
| 42,334 | 32.49288 | 100 | py |
applesoss | applesoss-main/applesoss/__init__.py | 0 | 0 | 0 | py | |
applesoss | applesoss-main/applesoss/edgetrigger_utils.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 04 15:36:22 2020
@author: albert, MCR
Utility functions for the edgetrigger algorithm
"""
import numpy as np
from scipy.optimize import least_squares
def zero_roll(a, shift):
"""Like np.roll but the wrapped around part is set to zero. Only works
along the first axis of the array.
Parameters
----------
a : array
The input array
shift : int
The number of rows to shift by.
Returns
-------
result : np.array
The input array with rows shifted.
"""
result = np.zeros_like(a)
if shift >= 0:
result[shift:] = a[:-shift]
else:
result[:shift] = a[-shift:]
return result
def robust_polyfit(x, y, order, maxiter=5, nstd=3.):
"""Perform a robust polynomial fit.
Paremeters
----------
x : array, list
Independent fitting variable.
y : array, list
Dependent fitting variable.
order : int
Polynomial order to fit.
maxiter : int
Number of iterations for outlier rejection.
nstd : float
Number of standard deviations to consider a value an outlier.
Returns
-------
res : array[float]
The best fitting polynomial parameters.
"""
def _poly_res(p, x, y):
"""Residuals from a polynomial.
"""
return np.polyval(p, x) - y
mask = np.ones_like(x, dtype='bool')
for niter in range(maxiter):
# Fit the data and evaluate the best-fit model.
param = np.polyfit(x[mask], y[mask], order)
yfit = np.polyval(param, x)
# Compute residuals and mask ouliers.
res = y - yfit
stddev = np.std(res)
mask = np.abs(res) <= nstd*stddev
res = least_squares(_poly_res, param, loss='huber', f_scale=0.1,
args=(x[mask], y[mask])).x
return res
def get_image_dim(image, header=None, verbose=False):
"""Determine the properties of the image array.
Parameters
----------
image : array[float]
2D image of the detector.
header : astropy.io.fits.Header instance
The header from a SOSS reference file.
verbose : bool
If True, show diagnostic plots.
Returns
-------
dimx : int
X-dimension of the stack array.
dimy : int
Y-dimension of the stack array.
xos : int
Oversampling factor in the x-direction.
yos : int
Oversampling factor in the y-direction.
xnative : int
X-dimension of the stack array in native pixels.
ynative : int
Y-dimension of the stack array in native pixels.
padding : int
Amount of padding around the image, in native pixels.
refpix_mask : array[bool]
Boolean array indicating which pixels are light sensitive (True) and
which are reference pixels (False).
"""
# Dimensions of the subarray.
dimy, dimx = np.shape(image)
# If no header was passed we have to check all possible sizes.
if header is None:
# Initialize padding to zero in this case because it is not a
# reference file.
padding = 0
# Assume the stack is a valid SOSS subarray.
# FULL: 2048x2048 or 2040x2040 (working pixels) or multiple if os.
# SUBSTRIP96: 2048x96 or 2040x96 (working pixels) or multiple if os.
# SUBSTRIP256: 2048x256 or 2040x252 (working pixels) or multiple if os.
# Check if the size of the x-axis is valid.
if (dimx % 2048) == 0:
xnative = 2048
xos = int(dimx // 2048)
elif (dimx % 2040) == 0:
xnative = 2040
xos = int(dimx // 2040)
else:
msg = ('Stack X dimension has unrecognized size of {:}. '
'Accepts 2048, 2040 or multiple thereof.')
raise ValueError(msg.format(dimx))
# Check if the y-axis is consistent with the x-axis.
if int(dimy/xos) in [96, 256, 252, 2040, 2048]:
yos = np.copy(xos)
ynative = int(dimy/yos)
else:
msg = ('Stack Y dimension ({:}) is inconsistent with '
'stack X dimension ({:}) for acceptable SOSS arrays')
raise ValueError(msg.format(dimy, dimx))
# Create a boolean mask indicating which pixels are not ref pixels.
refpix_mask = np.ones_like(image, dtype='bool')
if xnative == 2048:
# Mask out the left and right columns of reference pixels.
refpix_mask[:, :xos * 4] = False
refpix_mask[:, -xos * 4:] = False
if ynative == 2048:
# Mask out the top and bottom rows of reference pixels.
refpix_mask[:yos * 4, :] = False
refpix_mask[-yos * 4:, :] = False
if ynative == 256:
# Mask the top rows of reference pixels.
refpix_mask[-yos * 4:, :] = False
else:
# Read the oversampling and padding from the header.
padding = int(header['PADDING'])
xos, yos = int(header['OVERSAMP']), int(header['OVERSAMP'])
# Check that the stack respects its intended format.
if (dimx/xos - 2*padding) not in [2048]:
msg = 'The header passed is inconsistent with the X dimension ' \
'of the stack.'
raise ValueError(msg)
else:
xnative = 2048
if (dimy/yos - 2*padding) not in [96, 256, 2048]:
msg = 'The header passed is inconsistent with the Y dimension ' \
'of the stack.'
raise ValueError(msg)
else:
ynative = int(dimy/yos - 2*padding)
# The trace file contains no reference pixels so all pixels are good.
refpix_mask = np.ones_like(image, dtype='bool')
# If verbose print the output.
if verbose:
print('Data dimensions:')
str_args = dimx, dimy, xos, yos, xnative, ynative
msg = 'dimx={:}, dimy={:}, xos={:}, yos={:}, xnative={:}, ynative={:}'
print(msg.format(*str_args))
return dimx, dimy, xos, yos, xnative, ynative, padding, refpix_mask
| 6,141 | 29.557214 | 79 | py |
applesoss | applesoss-main/applesoss/applesoss.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 19 11:46 2021
@author: MCR
Definitions of the main functions for the applesoss (A Producer of ProfiLEs
for SOSS) module. This class will be initialized and called by the user to
create models of the spatial profiles for the first, second, and third order
SOSS traces, for use as the specprofile reference file required by the ATOCA
algorithm, or alternatively as the PSF weights for an optimal extraction.
"""
from astropy.io import fits
import numpy as np
from scipy.interpolate import interp2d
import warnings
from applesoss import applesoss_utils
from applesoss.edgetrigger_centroids import get_soss_centroids
from applesoss import plotting
warnings.simplefilter(action='ignore', category=RuntimeWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)
class EmpiricalProfile:
"""Class wrapper around the empirical spatial profile construction module.
Attributes
----------
clear : array-like
SOSS CLEAR exposure data frame.
wavemap : str
Path to SOSS 2D wavelength solution reference file.
tracetable : str
Path to SOSS trace table reference file.
subarray : str
NIRISS SOSS subarray identifier. One of 'SUBSTRIP256', 'SUBSTRIP96',
or 'FULL'.
pad : int
Amount of padding to include (in native pixels) in the spatial and
spectral directions.
oversample : int
Oversampling factor. Oversampling will be equal in the spectral and
spatial directions.
order1 : array-like
First order spatial profile.
order2 : array-like
Second order spatial profile.
order3 : array-like
Third order spatial profile.
Methods
-------
build_empirical_profile
Construct the empirical spatial profiles.
write_specprofile_reference
Save spatial profile models to reference file.
"""
def __init__(self, clear, wavemap, tracetable, pad=0, oversample=1):
"""Initializer for EmpiricalProfile.
"""
# Initialize input attributes.
self.clear = clear
self.wavemap = wavemap
self.tracetable = tracetable
self.pad = pad
self.oversample = oversample
# Validate the parameters and determine the correct subarray.
self.subarray = self.validate_inputs()
self.order1 = None
self.order2 = None
self.order3 = None
def build_empirical_profile(self, empirical=True, wave_increment=0.1,
halfwidth=16, obs_date=None, verbose=0):
"""Run the empirical spatial profile construction module.
Parameters
----------
empirical : bool
If True, pull the trace wings from uncontaminated trace profiles.
If False, use WebbPSF.
wave_increment : float
Wavelength step (in µm) for PSF simulations. For accuracy, it is
advisable not to use steps larger than 0.1µm.
halfwidth : int
half-width of the trace in native pixels.
obs_date : str
Date of observations in 'yyyy-mm-dd' format.
verbose: int
Level of verbosity: either 3, 2, 1, or 0.
3 - show all of progress prints, progress bars, and diagnostic
plots.
2 - show progress prints and bars.
1 - show only progress prints.
0 - show nothing.
"""
# Run the empirical spatial profile construction.
o1, o2, o3 = build_empirical_profile(self.clear, self.subarray,
self.pad, self.oversample,
self.wavemap, self.tracetable,
empirical, wave_increment,
halfwidth, obs_date, verbose)
# Set any niggling negatives to zero (mostly for the bluest end of the
# second order where things get skrewy).
for o in [o1, o2, o3]:
ii = np.where(o < 0)
o[ii] = 0
# Store the spatial profiles as attributes.
self.order1, self.order2, self.order3 = o1, o2, o3
def write_specprofile_reference(self, subarray, output_dir='./',
filename=None):
"""Write the spatial profiles to a reference file to be injested by
ATOCA.
Parameters
----------
subarray : str
SOSS subarray, either 'FULL', 'SUBSTRIP256', or 'SUBSTRIP96'
output_dir : str
Directory to which to save file.
filename : str
Name of reference file.
"""
# Just make sure that everything is the same shape
assert self.order1.shape == self.order2.shape == self.order3.shape
dimy, dimx = self.order1.shape
# Create stacked array with all orders.
stack_full = np.zeros(((2048+2*self.pad)*self.oversample,
(2048+2*self.pad)*self.oversample, 3))
stack_full[-dimy:, :, 0] = np.copy(self.order1)
stack_full[-dimy:, :, 1] = np.copy(self.order2)
stack_full[-dimy:, :, 2] = np.copy(self.order3)
# Pass to reference file creation.
hdulist = applesoss_utils.init_spec_profile(stack_full,
self.oversample, self.pad,
subarray, filename)
hdu = fits.HDUList(hdulist)
if filename is None:
filepattern = 'APPLESOSS_ref_2D_profile_{0}_os{1}_pad{2}.fits'
filename = filepattern.format(subarray, self.oversample, self.pad)
print('Saving to file '+(output_dir + filename))
hdu.writeto(output_dir + filename, overwrite=True)
return filename
def validate_inputs(self):
"""Validate the input parameters.
"""
return applesoss_utils.validate_inputs(self)
def build_empirical_profile(clear, subarray, pad, oversample, wavemap,
tracetable, empirical, wave_increment, halfwidth,
obs_date, verbose):
"""Main procedural function for the empirical spatial profile construction
module. Calling this function will initialize and run all the required
subroutines to produce a spatial profile for the first, second, and third
orders. The spatial profiles generated can include oversampling as well
as padding in both the spatial and spectral directions.
Parameters
----------
clear : array-like
SOSS CLEAR exposure data frame.
subarray : str
NIRISS SOSS subarray identifier. One of 'SUBSTRIP256', 'SUBSTRIP96',
or 'FULL'.
pad : int
Amount of padding to include (in native pixels) in the spatial and
spectral directions.
oversample : int
Oversampling factor. Oversampling will be equal in the spectral and
spatial directions.
wavemap : str
Path to SOSS 2D wavelength solution reference file.
tracetable : str
Path to SOSS trace table reference file.
empirical : bool
If True, pull the trace wings from uncontaminated trace profiles. If
False, use WebbPSF.
wave_increment : float
Wavelength step (in µm) for PSF simulations.
halfwidth : int
Half-width of the trace in native pixels.
obs_date : str
Date of observations in 'yyyy-mm-dd', or yyyy-mm-ddThh:mm:ss format.
verbose : int
Level of verbosity: either 3, 2, 1, or 0.
3 - show all of progress prints, progress bars, and diagnostic plots.
2 - show progress prints and bars.
1 - show only progress prints.
0 - show nothing.
Returns
-------
o1_uncontam : np.array
Uncontaminated spatial profile for the first order.
o2_uncontam : np.array
Uncontaminated spatial profile for the second order.
o3_uncontam : np.array
Uncontaminated spatial profile for the third order.
Raises
------
ValueError
When the clear dimensions do not match a known subarray.
"""
if empirical is True:
mode = 'empirical'
else:
mode = 'simulation'
print('Starting the applesoss module in {} mode.\n'.format(mode))
# ========= INITIAL SETUP =========
# If subarray is FULL - trim down to SUBSTRIP256 and work with that.
if subarray == 'FULL':
clear = clear[-256:, :]
# Reset all variable to appropriate SUBSTRIP256 values.
subarray = 'SUBSTRIP256'
# Add a floor level such that all pixel values are positive and interpolate
# bad pixels
if verbose != 0:
print(' Initial processing.')
print(' Interpolating bad pixels...', flush=True)
floor = np.nanpercentile(clear, 0.1)
clear -= floor
# Get the centroid positions for both orders from the data using the
# edgetrig method.
if verbose != 0:
print(' Getting trace centroids...')
centroids = get_soss_centroids(clear, tracetable, subarray=subarray)
if verbose == 3:
plotting.plot_centroid(clear, centroids)
clear += floor
# The four columns of pixels on the left and right edge of the SOSS
# detector are reference pixels. Trim them off and replace them with
# interpolations of the edge-most profiles.
clear = pad_spectral_axis(clear[:, 5:-5],
centroids['order 1']['X centroid'][5:-5],
centroids['order 1']['Y centroid'][5:-5],
pad=5, ref_cols=[0, -1], replace=True)
# ========= CONSTRUCT SPATIAL PROFILE MODELS =========
# Build a first estimate of the first, second, and third order spatial
# profiles. The cores can be mostly read off of the clear dataframe - it
# is just the wings that need reconstruction. For this, we will use
# either WebbPSF, or uncontaminated trace profiles from the data itself.
if empirical is False:
# Generate WebbPSF 1D profiles across a range of wavelengths.
psfs = applesoss_utils.generate_psfs(wave_increment=wave_increment,
verbose=verbose,
obs_date=obs_date)
else:
psfs = None
# === First Order ===
# Construct the first order profile.
if verbose != 0:
print(' Building the spatial profile models.')
print(' Starting the first order model...', flush=True)
o1_results = reconstruct_order(clear, centroids, order=1,
empirical=empirical, psfs=psfs,
halfwidth=halfwidth, pad=0, wavemap=wavemap)
o1_uncontam, o1_rect = o1_results
# Add padding to first order spatial axis if necessary.
if pad != 0:
o1_uncontam = np.pad(o1_uncontam, ((pad, pad), (0, 0)), mode='edge')
# If the subarray is SUBSTRIP96, this is all we can do. However, for
# SUBSTRIP256 we can reconstruct the second and third orders as well.
if subarray != 'SUBSTRIP96':
dimy = 256
# === Second Order ===
# Subtract off the reconstructed first order.
o2_res = clear - o1_uncontam[pad:dimy+pad]
# Construct the second order profile.
if verbose != 0:
print(' Starting the second order trace...')
o2_results = reconstruct_order(o2_res, centroids, order=2,
empirical=empirical, psfs=psfs,
halfwidth=halfwidth, pad=pad,
wavemap=wavemap, o1_prof=o1_rect,
clear2=clear, verbose=verbose)
o2_uncontam, o2_rect = o2_results
# Add padding to the lower edge of spatial axis if necessary.
if pad != 0:
o2_uncontam = np.pad(o2_uncontam, ((pad, 0), (0, 0)), mode='edge')
# === Third Order ===
# Construct the third order profile.
# Subtract off the reconstructed first and second orders.
o3_res = clear - o1_uncontam[pad:dimy+pad] - o2_uncontam[pad:dimy+pad]
if verbose != 0:
print(' Starting the third order trace...')
o3_out = reconstruct_order(o3_res, centroids, order=3,
empirical=empirical, psfs=psfs,
halfwidth=halfwidth, pad=pad,
wavemap=wavemap, pivot=700, o2_prof=o2_rect,
o1_prof=o1_rect, clear2=clear,
verbose=verbose)
o3_uncontam = o3_out[0]
# Add padding to the lower edge of the spatial axis if necessary.
if pad != 0:
o3_uncontam = np.pad(o3_uncontam, ((pad, 0), (0, 0)), mode='edge')
else:
msg = 'Only order 1 can be reconstructed for SUBSTRIP96.'
warnings.warn(msg)
o2_uncontam = np.ones_like(o1_uncontam)
o3_uncontam = np.ones_like(o1_uncontam)
# ========= FINAL TUNING =========
# Pad the spectral axes.
if pad != 0:
if verbose != 0:
print(' Adding padding to the spectral axis...')
o1_uncontam = pad_spectral_axis(o1_uncontam,
centroids['order 1']['X centroid'],
centroids['order 1']['Y centroid'],
pad=pad)
o2_uncontam = pad_spectral_axis(o2_uncontam,
centroids['order 2']['X centroid'],
centroids['order 2']['Y centroid'],
pad=pad)
o3_uncontam = pad_spectral_axis(o3_uncontam,
centroids['order 3']['X centroid'],
centroids['order 3']['Y centroid'],
pad=pad)
# Column normalize. Only want the original detector to sum to 1, not the
# additional padding + oversampling.
o1_uncontam /= np.nansum(o1_uncontam, axis=0)
o2_uncontam /= np.nansum(o2_uncontam, axis=0)
o3_uncontam /= np.nansum(o3_uncontam, axis=0)
# Replace NaNs resulting from all zero columns with zeros
for o in [o2_uncontam, o3_uncontam]:
ii = np.where(~np.isfinite(o))
o[ii] = 0
# Smooth over outlier columns.
o1_uncontam = smooth_outlier_columns(o1_uncontam)
o2_uncontam = smooth_outlier_columns(o2_uncontam)
o3_uncontam = smooth_outlier_columns(o3_uncontam)
# Add oversampling.
if oversample != 1:
if verbose != 0:
print(' Oversampling...')
o1_uncontam = oversample_frame(o1_uncontam, os=oversample)
o2_uncontam = oversample_frame(o2_uncontam, os=oversample)
o3_uncontam = oversample_frame(o3_uncontam, os=oversample)
if verbose != 0:
print('\nDone.')
return o1_uncontam, o2_uncontam, o3_uncontam
def oversample_frame(dataframe, os):
"""Oversample a dataframe by a specified amount.
Parameters
----------
dataframe : array-like
Dataframe to be oversampled.
os : int
Oversampling factor to apply to each axis.
Returns
-------
data_os : np.array
Input dataframe with each axis oversampled by the desired amount.
"""
# Generate native and oversampled axes.
dimy, dimx = np.shape(dataframe)
x, x_os = np.arange(dimx), np.arange(dimx * os) / os
y, y_os = np.arange(dimy), np.arange(dimy * os) / os
# Interpolate onto the oversampled grid.
pp = interp2d(x, y, dataframe, kind='cubic')
data_os = pp(x_os, y_os)
return data_os
def pad_spectral_axis(frame, xcens, ycens, pad=0, ref_cols=None,
replace=False):
"""Add padding to the spectral axis by interpolating the corresponding
edge profile onto a set of extrapolated centroids.
Parameters
----------
frame : array-like
Data frame.
xcens : array-like
X-coordinates of the trace centroids.
ycens : array-like
Y-coordinates of the trace centroids.
pad : int
Amount of padding to add along either end of the spectral axis (in
pixels).
ref_cols : array-like
Which columns to use as the reference profiles for the padding.
replace : bool
Toggle for functionality to replace reference pixel columns.
Returns
-------
newframe : np.array
Data frame with padding on the spectral axis.
"""
# Set default reference columns.
if ref_cols is None:
ref_cols = [6, -6]
dimy, dimx = np.shape(frame)
# Get centroids and extended centroids.
pp = np.polyfit(xcens, ycens, 5)
if replace:
xax_pad = np.arange(dimx + 2 * pad)
else:
xax_pad = np.arange(dimx + 2*pad) - pad
ycens_pad = np.polyval(pp, xax_pad)
# Construct padded dataframe and paste in orignal data.
newframe = np.zeros((dimy, dimx + 2*pad))
newframe[:, pad:(dimx + pad)] = frame
# Loop over columns to pad and stitch on the shifted reference column.
for col in range(pad):
yax = np.arange(dimy)
newframe[:, col] = np.interp(yax,
yax - ycens[ref_cols[0]] + ycens_pad[col],
frame[:, ref_cols[0]])
for col in range(dimx + ref_cols[1] + pad+1, dimx + 2*pad):
yax = np.arange(dimy)
newframe[:, col] = np.interp(yax,
yax - ycens[ref_cols[1]] + ycens_pad[col],
frame[:, ref_cols[1]])
return newframe
def reconstruct_order(clear, cen, order, empirical, psfs, halfwidth, pad,
wavemap, pivot=750, o1_prof=None, o2_prof=None,
clear2=None, verbose=0):
"""Reconstruct the wings of the the spatial profiles using either simulated
WebbPSF PSFs, or fully empirical profiles taken from uncontaminated
regions of the data. Will also add padding to the spatial axes of orders 2
and 3, where the trace touches the top edge of the detector.
Parameters
----------
clear : np.array
NIRISS/SOSS data frame.
cen : dict
Centroids dictionary.
order : int
The order to reconstruct.
empirical : bool
If True, pull the trace wings from uncontaminated trace profiles. If
False, use WebbPSF.
psfs : array-like
Array of simulated 1D SOSS PSFs.
halfwidth : int
Half-width of the trace in native pixels.
pad : int
Amount of padding in native pixels to add to the spatial axis.
wavemap : str
Path to SOSS 2D wavelength solution reference file.
pivot : int
For order 2, minimum spectral pixel value for which a wing
reconstruction will be attempted. For order 3, the maximum pixel value.
For spectral pixels < or >pivot respectively, the profile at pivot will
be used.
o1_prof : array-like
Uncontaminated order 1 spatial profile. Only necessary for
reconstruction of order 2.
o2_prof : array_like
Uncontaminated order 2 spatial profile. Only necessary for
reconstruction of order 3.
clear2 : array-like
For orders 2 and 3 where clear is a residual frame, clear2 is the
original data frame. Only necessary for orders 2 and 3.
verbose : int
level of verbosity.
Returns
-------
new_frame : np.array
Model of the second order spatial profile with wings reconstructed.
frame_rect : np.array
Reconstructed profiles, rectified.
"""
# Initalize new data frame and get subarray dimensions.
dimy, dimx = np.shape(clear)
new_frame = np.zeros((dimy+pad, dimx))
os_factor = 10 # Do recentroiding at 10x ovserampling.
# Get wavelength calibration.
wavecal_x, wavecal_w = applesoss_utils.get_wave_solution(wavemap,
order=order)
# In the fully empirical case, wings only need to be generated once.
# Do this now.
if empirical is True:
if order == 1:
clear2 = clear
ewing, ewing2, stand = get_wings(1.0, psfs, clear2, cen,
halfwidth=halfwidth, empirical=True,
verbose=verbose)
dimy_r = len(stand)
else:
dimy_r = np.shape(psfs['PSF'])[1]
# Set dimensions for rectified trace array.
frame_rect = np.zeros((dimy_r, dimx))
first_time = True
if order == 3:
maxi = 0
else:
maxi = dimx
for i in range(dimx):
wave = wavecal_w[i]
# Skip over columns where the throughput is too low to get a good core
# and/or the order is buried within another.
if order == 2 and i < pivot:
continue
if order == 3:
continue
# If the centroid is too close to the detector edge, make note of
# the column and deal with it later
cen_o = int(round(cen['order '+str(order)]['Y centroid'][i]*os_factor, 0))
if cen_o/os_factor + halfwidth > dimy:
if i < maxi:
maxi = i
continue
# Get a copy of the spatial profile, and normalize it by its max value.
working_prof = np.copy(clear[:, i])
lwp = len(working_prof)
working_prof_os = np.interp(np.linspace(0, (os_factor*lwp-1)/os_factor,
os_factor*lwp),
np.arange(lwp), working_prof)
max_val = np.nanpercentile(working_prof[(cen_o//os_factor-halfwidth):(cen_o//os_factor+halfwidth)], 99)
# Get the trace wings, if simulated. These must be generated for each
# individual wavelength.
if first_time is False:
verbose = 0
if empirical is False:
wing, wing2, stand = get_wings(wave, psfs, clear, cen,
halfwidth=halfwidth,
empirical=False, verbose=verbose)
shift = 0
else:
wing, wing2 = np.copy(ewing), np.copy(ewing2)
# Hack to account for the fact that the trace thins slightly
# (~1 pixel) towards bluer wavelengths.
shift = (-0.5/2040)*i + 1.5
wing *= max_val
lw = len(wing)
wing_os = np.interp(np.linspace(0, (os_factor*lw-1)/os_factor,
os_factor*lw)+shift, np.arange(lw),
wing)
wing2 *= max_val
lw2 = len(wing2)
wing2_os = np.interp(np.linspace(0, (os_factor*lw2-1)/os_factor-1,
os_factor*lw2), np.arange(lw2), wing2)
first_time = False
# Concatenate the wings onto the profile core.
start = int(round((cen_o - halfwidth*os_factor), 0))
end = int(round((cen_o + halfwidth*os_factor), 0))
stitch = np.concatenate([wing2_os,
working_prof_os[(start+os_factor):end],
wing_os])
# Interpolate the rectified PSF back to native pixel sampling.
ls = len(stitch)
stitch_nat = np.interp(np.arange(dimy_r),
np.linspace(0, (ls-1)/os_factor, ls), stitch)
frame_rect[:, i] = stitch_nat
# Shift the oversampled PSF to its correct centroid position
psf_len = dimy_r * os_factor
stitch = np.interp(np.arange((dimy+pad)*os_factor),
np.arange(psf_len) - psf_len//2 + cen_o, stitch)
# Interpolate shifted PSF to native pixel sampling.
stitch = np.interp(np.arange(dimy+pad),
np.linspace(0, (os_factor*(dimy+pad)-1)/os_factor,
os_factor*(dimy+pad)), stitch)
new_frame[:, i] = stitch
# For columns where the order 2 core is not distinguishable (due to the
# throughput dropping near 0, or it being buried in order 1) reuse a
# profile from order 1 at the same wavelength. The shape of the PSF is
# completely determined by the optics, and should thus be identical for a
# given wavelength, irrespective of the order. The differing
# tilt/spectral resolution of order 1 vs 2 may have some effect here
# though.
if order == 2:
wavecal_x_o1, wavecal_w_o1 = applesoss_utils.get_wave_solution(wavemap,
order=1)
for i in range(pivot):
wave_o2 = wavecal_w[i]
dimx_r, dimy_r = np.shape(o1_prof.T)
co1 = np.ones(dimx_r) * dimy_r / 2
co2 = cen['order 2']['Y centroid'][i]
working_prof = applesoss_utils.interpolate_profile(wave_o2, co2,
wavecal_w_o1,
o1_prof.T, co1,
os_factor=os_factor)
new_frame[:, i] = working_prof[:dimy+pad]
# For columns where the centroid is off the detector, reuse the bluest
# reconstructed profile.
# Hard stop for profile reuse - one half width passed where the trace
# centroid leaves the detector.
stop = np.where(cen['order 2']['Y centroid'] >= dimy+pad)[0][0]
stop += halfwidth
for i in range(maxi, stop):
anchor_prof = new_frame[:, maxi - 1]
sc = cen['order '+str(order)]['Y centroid'][maxi - 1]
ec = cen['order '+str(order)]['Y centroid'][i]
working_prof = np.interp(np.arange(dimy+pad),
np.arange(dimy+pad) - sc + ec,
anchor_prof)
new_frame[:, i] = working_prof
# Do something similar for order 3. Where the throughput is too low, reuse
# a profile of the same wavelength from order 2.
if order == 3:
# Hard stop for profile reuse - one half width passed where the trace
# centroid leaves the detector.
stop = np.where(cen['order 3']['Y centroid'] >= dimy+pad)[0][0]
stop += halfwidth
wavecal_x_o2, wavecal_w_o2 = applesoss_utils.get_wave_solution(wavemap,
order=2)
for i in range(maxi, stop):
wave_o3 = wavecal_w[i]
dimx_r, dimy_r = np.shape(o2_prof.T)
co2 = np.ones(dimx_r) * dimy_r / 2
co3 = cen['order 3']['Y centroid'][i]
working_prof = applesoss_utils.interpolate_profile(wave_o3, co3,
wavecal_w_o2,
o2_prof.T, co2,
os_factor=os_factor)
new_frame[:, i] = working_prof[:dimy+pad]
return new_frame, frame_rect
def smooth_outlier_columns(frame, thresh=2):
"""Identify and smooth over residual outlier columns in the final profiles.
Parameters
----------
frame : array-like
2D profile for a single order.
thresh : int
Sigma threshold to flag a column as an outlier.
Returns
-------
fix_frame : array-like
2D profile with outlier columns interpolated.
"""
fix_frame = np.copy(frame)
# Take the difference of each neighbouring column.
diff_frame = np.diff(fix_frame, axis=1)
# Find the median difference level of each column pair.
diff_lvl = np.nanmedian(diff_frame, axis=0)
# Find columns where the difference level is overly discrepant. Fix these
# columns.
ii = np.where(np.abs(diff_lvl) > thresh * np.nanstd(diff_lvl))[0]
# Find groups of outlier columns
chunks = applesoss_utils.find_consecutive(ii)
for chunk in chunks:
# Interpolate outlier columns using a median of the neighbours.
if len(chunk) == 2:
col = chunk[1]
fix_frame[:, col] = np.median([frame[:, col-1], frame[:, col+1]],
axis=0)
elif len(chunk) == 1:
col = chunk[0]
fix_frame[:, col] = np.median([frame[:, col-1], frame[:, col+1]],
axis=0)
elif len(chunk) < 5:
col_start = chunk[0]
col_end = chunk[-1]
fix_frame[:, col_start:col_end + 1] = np.median([frame[:, col_start-1], frame[:, col_end+1]], axis=0)[:, None]
# Larger bad clumps will have to be treated differently --- curvature
# of the trace will probably start to matter.
else:
continue
return fix_frame
def get_wings(w, psfs, deep, cens, halfwidth, badpix=None, empirical=True,
verbose=0):
"""Extract the wings from a simulated WebbPSF 1D profile.
Parameters
----------
w : float
Wavelength of interest (µm). Necesssary if empirical is False.
psfs : array-like
Array of simulated SOSS PSFs. Necesssary if empirical is False.
deep : array-like
SOSS deep stack. Necesssary if empirical is True.
cens : dict
Centroids dictionary. Necesssary if empirical is True.
halfwidth : int
Half-width of the SOSS trace.
badpix : array-like
Bad pixel mask that is the same shape as deep. Zero-valued pixels will
be used, and all other-valued pixels will be masked.
empirical : bool
If True, pull the trace wings from uncontaminated trace profiles. If
False, use WebbPSF.
verbose : int
Level of verbosity.
Returns
-------
wing : np.array
Extracted right wing.
wing2 : np.array
Extracted left wing.
stand : np.array
Profile from which wings were extracted.
"""
# Get an uncontaminated trace profile from the red end of order 1.
if empirical is True:
stand = applesoss_utils.generate_superprof(deep, cens, badpix)
# Get the simulated profile at the desired wavelength.
else:
stand = applesoss_utils.interpolate_profile(w, 0, psfs['Wave'][:, 0],
psfs['PSF'],
np.zeros_like(psfs['Wave'][:, 0]))
psf_size = len(stand)
# Normalize to a max value of one to match the simulated profile.
max_val = np.nanpercentile(stand, 99)
stand /= max_val
# Define the edges of the profile 'core'.
ax = np.arange(psf_size)
ystart = int(round(psf_size//2 - halfwidth, 0)) + 1
yend = int(round(psf_size//2 + halfwidth, 0))
# Get and fit the 'right' wing.
wing = stand[yend:]
pp = np.polyfit(ax[yend:], np.log10(wing), 9)
wing = 10**np.polyval(pp, ax[yend:])
# Get and fit the 'left' wing.
wing2 = stand[:ystart]
pp = np.polyfit(ax[:ystart], np.log10(wing2), 9)
wing2 = 10**np.polyval(pp, ax[:ystart])
# Do diagnostic plot if necessary.
if verbose == 3:
plotting.plot_wing_simulation(stand, halfwidth, wing, wing2, ax,
ystart, yend)
return wing, wing2, stand
| 31,493 | 39.222222 | 122 | py |
gunpowder | gunpowder-master/gunpowder/roi.py | from funlib.geometry import Roi # noqa
| 40 | 19.5 | 39 | py |
gunpowder | gunpowder-master/gunpowder/provider_spec.py | import math
from gunpowder.coordinate import Coordinate
from gunpowder.array import ArrayKey
from gunpowder.array_spec import ArraySpec
from gunpowder.graph import GraphKey
from gunpowder.graph_spec import GraphSpec
from gunpowder.roi import Roi
from .freezable import Freezable
import time
import logging
import copy
logger = logging.getLogger(__name__)
import logging
import warnings
logger = logging.getLogger(__file__)
class ProviderSpec(Freezable):
"""A collection of (possibly partial) :class:`ArraySpecs<ArraySpec>` and
:class:`GraphSpecs<GraphSpec>` describing a
:class:`BatchProvider's<BatchProvider>` offered arrays and graphs.
This collection mimics a dictionary. Specs can be added with::
provider_spec = ProviderSpec()
provider_spec[array_key] = ArraySpec(...)
provider_spec[graph_key] = GraphSpec(...)
Here, ``array_key`` and ``graph_key`` are :class:`ArrayKey` and
:class:`GraphKey`. The specs can be queried with::
array_spec = provider_spec[array_key]
graph_spec = provider_spec[graph_key]
Furthermore, pairs of keys/values can be iterated over using
``provider_spec.items()``.
To access only array or graph specs, use the dictionaries
``provider_spec.array_specs`` or ``provider_spec.graph_specs``,
respectively.
Args:
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`):
Initial array specs.
graph_specs (``dict``, :class:`GraphKey` -> :class:`GraphSpec`):
Initial graph specs.
Attributes:
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`):
Contains all array specs contained in this provider spec.
graph_specs (``dict``, :class:`GraphKey` -> :class:`GraphSpec`):
Contains all graph specs contained in this provider spec.
"""
def __init__(self, array_specs=None, graph_specs=None, points_specs=None):
self.array_specs = {}
self.graph_specs = {}
self.freeze()
# use __setitem__ instead of copying the dicts, this ensures type tests
# are run
if array_specs is not None:
for key, spec in array_specs.items():
self[key] = spec
if graph_specs is not None:
for key, spec in graph_specs.items():
self[key] = spec
if points_specs is not None:
for key, spec in points_specs.items():
self[key] = spec
@property
def points_specs(self):
# Alias to graphs
warnings.warn(
"points_specs are depricated. Please use graph_specs", DeprecationWarning
)
return self.graph_specs
def __setitem__(self, key, spec):
assert isinstance(key, ArrayKey) or isinstance(key, GraphKey), (
f"Only ArrayKey or GraphKey (not {type(key).__name__} are "
"allowed as key for ProviderSpec, "
)
if isinstance(key, ArrayKey):
if isinstance(spec, Roi):
spec = ArraySpec(roi=spec)
assert isinstance(spec, ArraySpec), (
f"Only ArraySpec (not {type(spec).__name__}) can be set for " "ArrayKey"
)
self.array_specs[key] = spec.copy()
else:
if isinstance(spec, Roi):
spec = GraphSpec(roi=spec)
assert isinstance(spec, GraphSpec), (
f"Only GraphSpec (not {type(spec).__name__}) can be set for " "GraphKey"
)
self.graph_specs[key] = spec.copy()
def __getitem__(self, key):
if isinstance(key, ArrayKey):
return self.array_specs[key]
elif isinstance(key, GraphKey):
return self.graph_specs[key]
else:
raise RuntimeError(
"Only ArrayKey or GraphKey can be used as keys in a "
"%s." % type(self).__name__
)
def __len__(self):
return len(self.array_specs) + len(self.graph_specs)
def __contains__(self, key):
if isinstance(key, ArrayKey):
return key in self.array_specs
elif isinstance(key, GraphKey):
return key in self.graph_specs
else:
raise RuntimeError(
"Only ArrayKey or GraphKey, can be used as keys in a "
"%s. Key %s is a %s" % (type(self).__name__, key, type(key).__name__)
)
def __delitem__(self, key):
if isinstance(key, ArrayKey):
del self.array_specs[key]
elif isinstance(key, GraphKey):
del self.graph_specs[key]
else:
raise RuntimeError(
"Only ArrayKey or GraphKey can be used as keys in a "
"%s." % type(self).__name__
)
def remove_placeholders(self):
self.array_specs = {
k: v for k, v in self.array_specs.items() if not v.placeholder
}
self.graph_specs = {
k: v for k, v in self.graph_specs.items() if not v.placeholder
}
def items(self):
"""Provides a generator iterating over key/value pairs."""
for k, v in self.array_specs.items():
yield k, v
for k, v in self.graph_specs.items():
yield k, v
def get_total_roi(self):
"""Get the union of all the ROIs."""
total_roi = None
for specs_type in [self.array_specs, self.graph_specs]:
for _, spec in specs_type.items():
if total_roi is None:
total_roi = spec.roi
elif spec.roi is not None:
total_roi = total_roi.union(spec.roi)
return total_roi
def get_common_roi(self):
"""Get the intersection of all the requested ROIs."""
common_roi = None
for specs_type in [self.array_specs, self.graph_specs]:
for _, spec in specs_type.items():
if common_roi is None:
common_roi = spec.roi
else:
common_roi = common_roi.intersect(spec.roi)
return common_roi
def get_lcm_voxel_size(self, array_keys=None):
"""Get the least common multiple of the voxel sizes in this spec.
Args:
array_keys (list of :class:`ArrayKey`, optional): If given,
consider only the given array types.
"""
if array_keys is None:
array_keys = self.array_specs.keys()
if not array_keys:
return None
lcm_voxel_size = None
for key in array_keys:
voxel_size = self.array_specs[key].voxel_size
if voxel_size is None:
continue
if lcm_voxel_size is None:
lcm_voxel_size = voxel_size
else:
lcm_voxel_size = Coordinate(
(
a * b // math.gcd(a, b)
for a, b in zip(lcm_voxel_size, voxel_size)
)
)
return lcm_voxel_size
def __eq__(self, other):
if isinstance(other, self.__class__):
other_dict = copy.deepcopy(other.__dict__)
self_dict = copy.deepcopy(self.__dict__)
return self_dict == other_dict
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __repr__(self):
r = "\n"
for key, spec in self.items():
r += "\t%s: %s\n" % (key, spec)
return r
| 7,634 | 29.662651 | 88 | py |
gunpowder | gunpowder-master/gunpowder/array.py | from .freezable import Freezable
from copy import deepcopy
from gunpowder.coordinate import Coordinate
from gunpowder.roi import Roi
import logging
import numpy as np
import copy
logger = logging.getLogger(__name__)
class Array(Freezable):
"""A numpy array with a specification describing the data.
Args:
data (array-like):
The data to be stored in the array. Will be converted to a numpy
array, if necessary.
spec (:class:`ArraySpec`, optional):
A spec describing the data.
attrs (``dict``, optional):
Optional attributes to describe this array.
"""
def __init__(self, data, spec=None, attrs=None):
self.spec = deepcopy(spec)
self.data = np.asarray(data)
self.attrs = attrs
if attrs is None:
self.attrs = {}
if spec is not None and spec.roi is not None and spec.voxel_size is not None:
for d in range(len(spec.voxel_size)):
assert (
spec.voxel_size[d] * data.shape[-spec.roi.dims + d]
== spec.roi.shape[d]
), "ROI %s does not align with voxel size %s * data shape %s" % (
spec.roi,
spec.voxel_size,
data.shape,
)
if spec.roi.offset[d] is not None:
assert (
spec.roi.offset[d] % spec.voxel_size[d] == 0
), "ROI offset %s must be a multiple of voxel size %s" % (
spec.roi.offset,
spec.voxel_size,
)
if spec.dtype is not None:
assert (
data.dtype == spec.dtype
), "data dtype %s does not match spec dtype %s" % (data.dtype, spec.dtype)
self.freeze()
def crop(self, roi, copy=True):
"""Create a cropped copy of this Array.
Args:
roi (:class:`Roi`):
ROI in world units to crop to.
copy (``bool``):
Make a copy of the data.
"""
assert self.spec.roi.contains(
roi
), "Requested crop ROI (%s) doesn't fit in array (%s)" % (roi, self.spec.roi)
if self.spec.roi == roi and not copy:
return self
voxel_size = self.spec.voxel_size
data_roi = (roi - self.spec.roi.offset) / voxel_size
slices = data_roi.get_bounding_box()
while len(slices) < len(self.data.shape):
slices = (slice(None),) + slices
data = self.data[slices]
if copy:
data = np.array(data)
spec = deepcopy(self.spec)
attrs = deepcopy(self.attrs)
spec.roi = deepcopy(roi)
return Array(data, spec, attrs)
def merge(self, array, copy_from_self=False, copy=False):
"""Merge this array with another one. The resulting array will have the
size of the larger one, with values replaced from ``array``.
This only works if one of the two arrays is contained in the other. In
this case, ``array`` will overwrite values in ``self`` (unless
``copy_from_self`` is set to ``True``).
A copy will only be made if necessary or ``copy`` is set to ``True``.
"""
# It is unclear how to merge arrays in all cases. Consider a 10x10 array,
# you crop out a 5x5 area, do a shift augment, and attempt to merge.
# What does that mean? specs have changed. It should be a new key.
raise NotImplementedError("Merge function should not be used!")
self_roi = self.spec.roi
array_roi = array.spec.roi
assert self_roi.contains(array_roi) or array_roi.contains(
self_roi
), "Can not merge arrays that are not contained in each other."
assert (
self.spec.voxel_size == array.spec.voxel_size
), "Can not merge arrays with different voxel sizes."
# make sure self contains array
if not self_roi.contains(array_roi):
return array.merge(self, not copy_from_self, copy)
# -> here we know that self contains array
# simple case, self overwrites all of array
if copy_from_self:
return self if not copy else deepcopy(self)
# -> here we know that copy_from_self == False
# simple case, ROIs are the same
if self_roi == array_roi:
return array if not copy else deepcopy(array)
# part of self have to be replaced, a copy is needed
merged = deepcopy(self)
voxel_size = self.spec.voxel_size
data_roi = (array_roi - self_roi.offset) / voxel_size
slices = data_roi.get_bounding_box()
while len(slices) < len(self.data.shape):
slices = (slice(None),) + slices
merged.data[slices] = array.data
return merged
def __repr__(self):
return str(self.spec)
def copy(self):
"""Create a copy of this array."""
return copy.deepcopy(self)
class ArrayKey(Freezable):
"""A key to identify arrays in requests, batches, and across nodes.
Used as key in :class:`BatchRequest` and :class:`Batch` to retrieve array
specs or arrays.
Args:
identifier (``string``):
A unique, human readable identifier for this array key. Will be
used in log messages and to look up arrays in requests and batches.
Should be upper case (like ``RAW``, ``GT_LABELS``). The identifier
is unique: Two array keys with the same identifier will refer to
the same array.
"""
def __init__(self, identifier):
self.identifier = identifier
self.hash = hash(identifier)
self.freeze()
logger.debug("Registering array key %s", self)
setattr(ArrayKeys, self.identifier, self)
def __eq__(self, other):
return hasattr(other, "identifier") and self.identifier == other.identifier
def __hash__(self):
return self.hash
def __repr__(self):
return self.identifier
class ArrayKeys:
"""Convenience access to all created :class:``ArrayKey``s. A key generated
with::
raw = ArrayKey('RAW')
can be retrieved as::
ArrayKeys.RAW
"""
pass
| 6,344 | 29.07109 | 86 | py |
gunpowder | gunpowder-master/gunpowder/batch.py | from copy import copy as shallow_copy
import logging
import multiprocessing
import warnings
from .freezable import Freezable
from .profiling import ProfilingStats
from .array import Array, ArrayKey
from .graph import Graph, GraphKey
logger = logging.getLogger(__name__)
class Batch(Freezable):
"""Contains the requested batch as a collection of :class:`Arrays<Array>`
and :class:`Graph` that is passed through the pipeline from sources to
sinks.
This collection mimics a dictionary. Items can be added with::
batch = Batch()
batch[array_key] = Array(...)
batch[graph_key] = Graph(...)
Here, ``array_key`` and ``graph_key`` are :class:`ArrayKey` and
:class:`GraphKey`. The items can be queried with::
array = batch[array_key]
graph = batch[graph_key]
Furthermore, pairs of keys/values can be iterated over using
``batch.items()``.
To access only arrays or graphs, use the dictionaries ``batch.arrays``
or ``batch.graphs``, respectively.
Attributes:
arrays (dict from :class:`ArrayKey` to :class:`Array`):
Contains all arrays that have been requested for this batch.
graphs (dict from :class:`GraphKey` to :class:`Graph`):
Contains all graphs that have been requested for this batch.
"""
__next_id = multiprocessing.Value("L")
@staticmethod
def get_next_id():
with Batch.__next_id.get_lock():
next_id = Batch.__next_id.value
Batch.__next_id.value += 1
return next_id
def __init__(self):
self.id = Batch.get_next_id()
self.profiling_stats = ProfilingStats()
self.arrays = {}
self.graphs = {}
self.affinity_neighborhood = None
self.loss = None
self.iteration = None
self.freeze()
def __setitem__(self, key, value):
if isinstance(value, Array):
assert isinstance(
key, ArrayKey
), "Only a ArrayKey is allowed as key for an Array value."
self.arrays[key] = value
elif isinstance(value, Graph):
assert isinstance(
key, GraphKey
), f"Only a GraphKey is allowed as key for Graph value."
self.graphs[key] = value
else:
raise RuntimeError(
"Only Array or Graph can be set in a %s." % type(self).__name__
)
def __getitem__(self, key):
if isinstance(key, ArrayKey):
return self.arrays[key]
elif isinstance(key, GraphKey):
return self.graphs[key]
else:
raise RuntimeError(
"Only ArrayKey or GraphKey can be used as keys in a "
"%s." % type(self).__name__
)
def __len__(self):
return len(self.arrays) + len(self.graphs)
def __contains__(self, key):
if isinstance(key, ArrayKey):
return key in self.arrays
elif isinstance(key, GraphKey):
return key in self.graphs
else:
raise RuntimeError(
"Only ArrayKey or GraphKey can be used as keys in a "
"%s. Key %s is a %s" % (type(self).__name__, key, type(key).__name__)
)
def __delitem__(self, key):
if isinstance(key, ArrayKey):
del self.arrays[key]
elif isinstance(key, GraphKey):
del self.graphs[key]
else:
raise RuntimeError(
"Only ArrayKey or GraphKey can be used as keys in a "
"%s." % type(self).__name__
)
def items(self):
"""Provides a generator iterating over key/value pairs."""
for k, v in self.arrays.items():
yield k, v
for k, v in self.graphs.items():
yield k, v
def get_total_roi(self):
"""Get the union of all the array ROIs in the batch."""
total_roi = None
for _, array in self.arrays.items():
if not array.spec.nonspatial:
if total_roi is None:
total_roi = array.spec.roi
else:
total_roi = total_roi.union(array.spec.roi)
for _, graph in self.graphs.items():
if total_roi is None:
total_roi = graph.spec.roi
else:
total_roi = total_roi.union(graph.spec.roi)
return total_roi
def __repr__(self):
r = "\n"
for collection_type in [self.arrays, self.graphs]:
for key, obj in collection_type.items():
r += "\t%s: %s\n" % (key, obj.spec)
return r
def crop(self, request, copy=False):
"""Crop batch to meet the given request."""
cropped = Batch()
cropped.profiling_stats = self.profiling_stats
cropped.loss = self.loss
cropped.iteration = self.iteration
for key, val in request.items():
assert key in self, "%s not contained in this batch" % key
if val.roi is None:
cropped[key] = self[key]
else:
if isinstance(key, GraphKey):
cropped[key] = self[key].crop(val.roi)
else:
cropped[key] = self[key].crop(val.roi, copy)
return cropped
def merge(self, batch, merge_profiling_stats=True):
"""Merge this batch (``a``) with another batch (``b``).
This creates a new batch ``c`` containing arrays and graphs from
both batches ``a`` and ``b``:
* Arrays or Graphs that exist in either ``a`` or ``b`` will be
referenced in ``c`` (not copied).
* Arrays or Graphs that exist in both batches will keep only
a reference to the version in ``b`` in ``c``.
All other cases will lead to an exception.
"""
merged = shallow_copy(self)
for key, val in batch.items():
# TODO: What is the goal of `val.spec.roi is None`? Why should that
# mean that the key in merged gets overwritten?
if key not in merged or val.spec.roi is None:
merged[key] = val
elif key in merged:
merged[key] = val
if merge_profiling_stats:
merged.profiling_stats.merge_with(batch.profiling_stats)
if batch.loss is not None:
merged.loss = batch.loss
if batch.iteration is not None:
merged.iteration = batch.iteration
return merged
| 6,565 | 29.398148 | 85 | py |
gunpowder | gunpowder-master/gunpowder/profiling.py | import copy
import numpy as np
import time
from .freezable import Freezable
class Timing(Freezable):
def __init__(self, node, method_name=None):
self.__name = type(node).__name__
self.__method_name = method_name
self.__start = 0
self.__first_start = 0
self.__last_stop = 0
self.__time = 0
self.freeze()
def start(self):
self.__start = time.time()
if self.__first_start == 0:
self.__first_start = self.__start
def stop(self):
if self.__start == 0:
return
t = time.time()
self.__time += t - self.__start
self.__start = 0
self.__last_stop = t
def elapsed(self):
"""Accumulated time elapsed between calls to start() and stop()."""
if self.__start == 0:
return self.__time
return self.__time + (time.time() - self.__start)
def span(self):
"""Timestamps of the first call to start() and last call to stop()."""
return self.__first_start, self.__last_stop
def get_node_name(self):
return self.__name
def get_method_name(self):
return self.__method_name
class TimingSummary(Freezable):
"""Holds repeated Timings of the same node/method to be queried for statistics."""
def __init__(self):
self.timings = []
self.times = []
self.freeze()
def add(self, timing):
"""Add a Timing to this summary."""
self.timings.append(timing)
self.times.append(timing.elapsed())
def merge(self, other):
"""Merge another summary into this one."""
for timing in other.timings:
self.add(timing)
def counts(self):
return len(self.times)
def min(self):
return np.min(self.times)
def max(self):
return np.max(self.times)
def mean(self):
return np.mean(self.times)
def median(self):
return np.median(self.times)
class ProfilingStats(Freezable):
def __init__(self):
self.__summaries = {}
self.freeze()
def add(self, timing):
"""Add a Timing instance. Timings are grouped by their class and method names."""
node_name = timing.get_node_name()
method_name = timing.get_method_name()
id = (node_name, method_name)
if id not in self.__summaries:
self.__summaries[id] = TimingSummary()
self.__summaries[id].add(copy.deepcopy(timing))
def merge_with(self, other):
"""Combine statitics of two ProfilingStats."""
for id, summary in other.__summaries.items():
if id in self.__summaries:
self.__summaries[id].merge(copy.deepcopy(summary))
else:
self.__summaries[id] = copy.deepcopy(summary)
def get_timing_summaries(self):
"""Get a dictionary (node_name,method_name) -> TimingSummary."""
return self.__summaries
def get_timing_summary(self, node_name, method_name=None):
"""Get a :class:`TimingSummary` for the given node and method name."""
if (node_name, method_name) not in self.__summaries:
raise RuntimeError(
"No timing summary for node %s, method %s" % (node_name, method_name)
)
return self.__summaries[(node_name, method_name)]
def span(self):
"""Timestamps of the first call to start() and last call to stop() over
all Timings added."""
spans = [
t.span()
for (_, summary) in self.__summaries.items()
for t in summary.timings
]
first_start = min([span[0] for span in spans])
last_stop = max([span[1] for span in spans])
return first_start, last_stop
def span_time(self):
"""Time between the first call to start() and last call to stop() over
any timing."""
start, stop = self.span()
return stop - start
| 3,969 | 26.762238 | 89 | py |
gunpowder | gunpowder-master/gunpowder/graph.py | from .graph_spec import GraphSpec
from .roi import Roi
from .freezable import Freezable
import numpy as np
import networkx as nx
from copy import deepcopy
from typing import Dict, Optional, Set, Iterator, Any
import logging
import itertools
import warnings
logger = logging.getLogger(__name__)
class Node(Freezable):
"""
A stucture representing each node in a Graph.
Args:
id (``int``):
A unique identifier for this Node
location (``np.ndarray``):
A numpy array containing a nodes location
Optional attrs (``dict``, str -> ``Any``):
A dictionary containing a mapping from attribute to value.
Used to store any extra attributes associated with the
Node such as color, size, etc.
Optional temporary (bool):
A tag to mark a node as temporary. Some operations such
as `trim` might make new nodes that are just biproducts
of viewing the data with a limited scope. These nodes
are only guaranteed to have an id different from those
in the same Graph, but may have conflicts if you request
multiple graphs from the same source with different rois.
"""
def __init__(
self,
id: int,
location: np.ndarray,
temporary: bool = False,
attrs: Optional[Dict[str, Any]] = None,
):
self.__attrs = attrs if attrs is not None else {}
self.attrs["id"] = id
self.location = location
# purpose is to keep track of nodes that were created during
# processing and do not have a corresponding node in the original source
self.attrs["temporary"] = temporary
self.freeze()
def __getattr__(self, attr):
if "__" not in attr:
return self.attrs[attr]
else:
return super().__getattr__(attr)
def __setattr__(self, attr, value):
if "__" not in attr:
self.attrs[attr] = value
else:
super().__setattr__(attr, value)
@property
def location(self):
location = self.attrs["location"]
return location
@location.setter
def location(self, new_location):
assert isinstance(new_location, np.ndarray)
self.attrs["location"] = new_location
@property
def id(self):
return self.attrs["id"]
@property
def original_id(self):
return self.id if not self.temporary else None
@property
def temporary(self):
return self.attrs["temporary"]
@property
def attrs(self):
return self.__attrs
@property
def all(self):
return self.attrs
@classmethod
def from_attrs(cls, attrs: Dict[str, Any]):
node_id = attrs["id"]
location = attrs["location"]
temporary = attrs.get("temporary", False)
return cls(id=node_id, location=location, temporary=temporary, attrs=attrs)
def __str__(self):
return f"Node({self.temporary}) ({self.id}) at ({self.location})"
def __repr__(self):
return str(self)
def __eq__(self, other):
return isinstance(other, Node) and self.id == other.id
def __hash__(self):
return hash(self.id)
class Edge(Freezable):
"""
A structure representing edges in a graph.
Args:
u (``int``)
The id of the 'u' node of this edge
v (``int``)
the id of the `v` node of this edge
"""
def __init__(self, u: int, v: int, attrs: Optional[Dict[str, Any]] = None):
self.__u = u
self.__v = v
self.__attrs = attrs if attrs is not None else {}
self.freeze()
@property
def u(self):
return self.__u
@property
def v(self):
return self.__v
@property
def all(self):
return self.__attrs
def __iter__(self):
return iter([self.u, self.v])
def __str__(self):
return f"({self.u}, {self.v})"
def __repr__(self):
return f"({self.u}, {self.v})"
def __eq__(self, other):
return self.u == other.u and self.v == other.v
def __hash__(self):
return hash((self.u, self.v))
def directed_eq(self, other):
return self.u == other.u and self.v == other.v
def undirected_eq(self, other):
return set([self.u, self.v]) == set([other.u, other.v])
class Graph(Freezable):
"""A structure containing a list of :class:`Node`, a list of :class:`Edge`,
and a specification describing the data.
Args:
nodes (``iterator``, :class:`Node`):
An iterator containing Vertices.
edges (``iterator``, :class:`Edge`):
An iterator containing Edges.
spec (:class:`GraphSpec`):
A spec describing the data.
"""
def __init__(self, nodes: Iterator[Node], edges: Iterator[Edge], spec: GraphSpec):
self.__spec = spec
self.__graph = self.create_graph(nodes, edges)
@property
def spec(self):
return self.__spec
@spec.setter
def spec(self, new_spec):
self.__spec = new_spec
@property
def directed(self):
return (
self.spec.directed
if self.spec.directed is not None
else self.__graph.is_directed()
)
def create_graph(self, nodes: Iterator[Node], edges: Iterator[Edge]):
if self.__spec.directed is None:
logger.debug(
"Trying to create a Graph without specifying directionality. Using default Directed!"
)
graph = nx.DiGraph()
elif self.__spec.directed:
graph = nx.DiGraph()
else:
graph = nx.Graph()
for node in nodes:
node.location = node.location.astype(self.spec.dtype)
vs = [(v.id, v.all) for v in nodes]
graph.add_nodes_from(vs)
graph.add_edges_from([(e.u, e.v, e.all) for e in edges])
return graph
@property
def nodes(self):
for node_id, node_attrs in self.__graph.nodes.items():
if "id" not in node_attrs:
node_attrs["id"] = node_id
v = Node.from_attrs(node_attrs)
if not np.issubdtype(v.location.dtype, self.spec.dtype):
raise Exception(
f"expected location to have dtype {self.spec.dtype} but it had {v.location.dtype}"
)
yield v
def num_vertices(self):
return self.__graph.number_of_nodes()
def num_edges(self):
return self.__graph.number_of_edges()
@property
def edges(self):
for (u, v), attrs in self.__graph.edges.items():
yield Edge(u, v, attrs)
def neighbors(self, node):
if self.directed:
for neighbor in self.__graph.successors(node.id):
yield Node.from_attrs(self.__graph.nodes[neighbor])
if self.directed:
for neighbor in self.__graph.predecessors(node.id):
yield Node.from_attrs(self.__graph.nodes[neighbor])
else:
for neighbor in self.__graph.neighbors(node.id):
yield Node.from_attrs(self.__graph.nodes[neighbor])
def __str__(self):
string = "Vertices:\n"
for node in self.nodes:
string += f"{node}\n"
string += "Edges:\n"
for edge in self.edges:
string += f"{edge}\n"
return string
def __repr__(self):
return str(self)
def node(self, id: int):
"""
Get node with a specific id
"""
attrs = self.__graph.nodes[id]
return Node.from_attrs(attrs)
def contains(self, node_id: int):
return node_id in self.__graph.nodes
def remove_node(self, node: Node, retain_connectivity=False):
"""
Remove a node.
retain_connectivity: preserve removed nodes neighboring edges.
Given graph: a->b->c, removing `b` without retain_connectivity
would leave us with two connected components, {'a'} and {'b'}.
removing 'b' with retain_connectivity flag set to True would
leave us with the graph: a->c, and only one connected component
{a, c}, thus preserving the connectivity of 'a' and 'c'
"""
if retain_connectivity:
predecessors = self.predecessors(node)
successors = self.successors(node)
for pred_id in predecessors:
for succ_id in successors:
if pred_id != succ_id:
self.add_edge(Edge(pred_id, succ_id))
self.__graph.remove_node(node.id)
def add_node(self, node: Node):
"""
Adds a node to the graph.
If a node exists with the same id as the node you are adding,
its attributes will be overwritten.
"""
node.location = node.location.astype(self.spec.dtype)
self.__graph.add_node(node.id, **node.all)
def remove_edge(self, edge: Edge):
"""
Remove an edge from the graph.
"""
self.__graph.remove_edge(edge.u, edge.v)
def add_edge(self, edge: Edge):
"""
Adds an edge to the graph.
If an edge exists with the same u and v, its attributes
will be overwritten.
"""
self.__graph.add_edge(edge.u, edge.v, **edge.all)
def copy(self):
return deepcopy(self)
def crop(self, roi: Roi):
"""
Will remove all nodes from self that are not contained in `roi` except for
"dangling" nodes. This means that if there are nodes A, B s.t. there
is an edge (A, B) and A is contained in `roi` but B is not, the edge (A, B)
is considered contained in the `roi` and thus node B will be kept as a
"dangling" node.
Note there is a helper function `trim` that will remove B and replace it with
a node at the intersection of the edge (A, B) and the bounding box of `roi`.
Args:
roi (:class:`Roi`):
ROI in world units to crop to.
"""
cropped = self.copy()
contained_nodes = set([v.id for v in cropped.nodes if roi.contains(v.location)])
all_contained_edges = set(
[
e
for e in cropped.edges
if e.u in contained_nodes or e.v in contained_nodes
]
)
fully_contained_edges = set(
[
e
for e in all_contained_edges
if e.u in contained_nodes and e.v in contained_nodes
]
)
partially_contained_edges = all_contained_edges - fully_contained_edges
contained_edge_nodes = set(list(itertools.chain(*all_contained_edges)))
all_nodes = contained_edge_nodes | contained_nodes
dangling_nodes = all_nodes - contained_nodes
for node in list(cropped.nodes):
if node.id not in all_nodes:
cropped.remove_node(node)
for edge in list(cropped.edges):
if edge not in all_contained_edges:
cropped.remove_edge(edge)
cropped.spec.roi = roi
return cropped
def shift(self, offset):
for node in self.nodes:
node.location += offset
def new_graph(self):
if self.directed():
return nx.DiGraph()
else:
return nx.Graph()
def trim(self, roi: Roi):
"""
Create a copy of self and replace "dangling" nodes with contained nodes.
A "dangling" node is defined by: Let A, B be nodes s.t. there exists an
edge (A, B) and A is contained in `roi` but B is not. Edge (A, B) is considered
contained, and thus B is kept as a "dangling" node.
"""
trimmed = self.copy()
contained_nodes = set([v.id for v in trimmed.nodes if roi.contains(v.location)])
all_contained_edges = set(
[
e
for e in trimmed.edges
if e.u in contained_nodes or e.v in contained_nodes
]
)
fully_contained_edges = set(
[
e
for e in all_contained_edges
if e.u in contained_nodes and e.v in contained_nodes
]
)
partially_contained_edges = all_contained_edges - fully_contained_edges
contained_edge_nodes = set(list(itertools.chain(*all_contained_edges)))
all_nodes = contained_edge_nodes | contained_nodes
dangling_nodes = all_nodes - contained_nodes
next_node = 0 if len(all_nodes) == 0 else max(all_nodes) + 1
trimmed._handle_boundaries(
partially_contained_edges,
contained_nodes,
roi,
node_id=itertools.count(next_node),
)
for node in trimmed.nodes:
assert roi.contains(
node.location
), f"Failed to properly contain node {node.id} at {node.location}"
return trimmed
def _handle_boundaries(
self,
crossing_edges: Iterator[Edge],
contained_nodes: Set[int],
roi: Roi,
node_id: Iterator[int],
):
nodes_to_remove = set([])
for e in crossing_edges:
u, v = self.node(e.u), self.node(e.v)
u_in = u.id in contained_nodes
v_in, v_out = (u, v) if u_in else (v, u)
in_location, out_location = (v_in.location, v_out.location)
new_location = self._roi_intercept(in_location, out_location, roi)
if not all(np.isclose(new_location, in_location)):
# use deepcopy because modifying this node should not modify original
new_attrs = deepcopy(v_out.attrs)
new_attrs["id"] = next(node_id)
new_attrs["location"] = new_location
new_attrs["temporary"] = True
new_v = Node.from_attrs(new_attrs)
new_e = Edge(
u=v_in.id if u_in else new_v.id, v=new_v.id if u_in else v_in.id
)
self.add_node(new_v)
self.add_edge(new_e)
nodes_to_remove.add(v_out)
for node in nodes_to_remove:
self.remove_node(node)
def _roi_intercept(
self, inside: np.ndarray, outside: np.ndarray, bb: Roi
) -> np.ndarray:
"""
Given two points, one inside a bounding box and one outside,
get the intercept between the line and the bounding box.
"""
offset = outside - inside
distance = np.linalg.norm(offset)
assert not np.isclose(distance, 0), f"Inside and Outside are the same location"
direction = offset / distance
# `offset` can be 0 on some but not all axes leaving a 0 in the denominator.
# `inside` can be on the bounding box, leaving a 0 in the numerator.
# `x/0` throws a division warning, `0/0` throws an invalid warning (both are fine here)
with np.errstate(divide="ignore", invalid="ignore"):
bb_x = np.asarray(
[
(np.asarray(bb.begin) - inside) / offset,
(np.asarray(bb.end) - inside) / offset,
],
dtype=self.spec.dtype,
)
with np.errstate(invalid="ignore"):
s = np.min(bb_x[np.logical_and((bb_x >= 0), (bb_x <= 1))])
new_location = inside + s * distance * direction
upper = np.array(bb.end, dtype=self.spec.dtype)
new_location = np.clip(
new_location, bb.begin, upper - upper * np.finfo(self.spec.dtype).eps
)
return new_location
def merge(self, other, copy_from_self=False, copy=False):
"""
Merge this graph with another. The resulting graph will have the Roi
of the larger one.
This only works if one of the two graphs contains the other.
In this case, ``other`` will overwrite edges and nodes with the same
ID in ``self`` (unless ``copy_from_self`` is set to ``True``).
Vertices and edges in ``self`` that are contained in the Roi of ``other``
will be removed (vice versa for ``copy_from_self``)
A copy will only be made if necessary or ``copy`` is set to ``True``.
"""
# It is unclear how to merge points in all cases. Consider a 10x10 graph,
# you crop out a 5x5 area, do a shift augment, and attempt to merge.
# What does that mean? specs have changed. It should be a new key.
raise NotImplementedError("Merge function should not be used!")
self_roi = self.spec.roi
other_roi = other.spec.roi
assert self_roi.contains(other_roi) or other_roi.contains(
self_roi
), "Can not merge graphs that are not contained in each other."
# make sure self contains other
if not self_roi.contains(other_roi):
return other.merge(self, not copy_from_self, copy)
# edges and nodes in addition are guaranteed to be in merged
base = other if copy_from_self else self
addition = self if copy_from_self else other
if copy:
merged = deepcopy(base)
else:
merged = base
for node in list(merged.nodes):
if merged.spec.roi.contains(node.location):
merged.remove_node(node)
for edge in list(merged.edges):
if merged.spec.roi.contains(
merged.node(edge.u)
) or merged.spec.roi.contains(merged.node(edge.v)):
merged.remove_edge(edge)
for node in addition.nodes:
merged.add_node(node)
for edge in addition.edges:
merged.add_edge(edge)
return merged
def to_nx_graph(self):
"""
returns a pure networkx graph containing data from
this Graph.
"""
return deepcopy(self.__graph)
@classmethod
def from_nx_graph(cls, graph, spec):
"""
Create a gunpowder graph from a networkx graph.
The network graph is expected to have a "location"
attribute for each node. If it is a subclass of a networkx
graph with extra functionality, this may not work.
"""
if spec.directed is None:
spec.directed = graph.is_directed()
g = cls([], [], spec)
g.__graph = graph
return g
def relabel_connected_components(self):
"""
create a new attribute "component" for each node
in this Graph
"""
for i, wcc in enumerate(self.connected_components):
for node in wcc:
self.__graph.nodes[node]["component"] = i
@property
def connected_components(self):
if not self.directed:
return nx.connected_components(self.__graph)
else:
return nx.weakly_connected_components(self.__graph)
def in_degree(self):
return self.__graph.in_degree()
def successors(self, node):
if self.directed:
return self.__graph.successors(node.id)
else:
return self.__graph.neighbors(node.id)
def predecessors(self, node):
if self.directed:
return self.__graph.predecessors(node.id)
else:
return self.__graph.neighbors(node.id)
class GraphKey(Freezable):
"""A key to identify graphs in requests, batches, and across
nodes.
Used as key in :class:`BatchRequest` and :class:`Batch` to retrieve specs
or graphs.
Args:
identifier (``string``):
A unique, human readable identifier for this graph key. Will be
used in log messages and to look up graphs in requests and batches.
Should be upper case (like ``CENTER_GRAPH``). The identifier is
unique: Two graph keys with the same identifier will refer to the
same graph.
"""
def __init__(self, identifier):
self.identifier = identifier
self.hash = hash(identifier)
self.freeze()
logger.debug("Registering graph type %s", self)
setattr(GraphKeys, self.identifier, self)
def __eq__(self, other):
return hasattr(other, "identifier") and self.identifier == other.identifier
def __hash__(self):
return self.hash
def __repr__(self):
return self.identifier
class GraphKeys:
"""Convenience access to all created :class:`GraphKey`s. A key generated
with::
centers = GraphKey('CENTER_GRAPH')
can be retrieved as::
GraphKeys.CENTER_GRAPH
"""
pass
| 20,695 | 30.028486 | 102 | py |
gunpowder | gunpowder-master/gunpowder/array_spec.py | import copy
from .coordinate import Coordinate
from .freezable import Freezable
class ArraySpec(Freezable):
"""Contains meta-information about an array. This is used by
:class:`BatchProviders<BatchProvider>` to communicate the arrays they
offer, as well as by :class:`Arrays<Array>` to describe the data they
contain.
Attributes:
roi (:class:`Roi`):
The region of interested represented by this array spec. Can be
``None`` for :class:`BatchProviders<BatchProvider>` that allow
requests for arrays everywhere, but will always be set for array
specs that are part of a :class:`Array`.
voxel_size (:class:`Coordinate`):
The size of the spatial axises in world units.
interpolatable (``bool``):
Whether the values of this array can be interpolated.
nonspatial (``bool``, optional):
If set, this array does not represent spatial data (e.g., a list of
labels for samples in a batch). ``roi`` and ``voxel_size`` have to
be ``None``. No consistency checks will be performed.
dtype (``np.dtype``):
The data type of the array.
"""
def __init__(
self,
roi=None,
voxel_size=None,
interpolatable=None,
nonspatial=False,
dtype=None,
placeholder=False,
):
self.roi = roi
self.voxel_size = None if voxel_size is None else Coordinate(voxel_size)
self.interpolatable = interpolatable
self.nonspatial = nonspatial
self.dtype = dtype
self.placeholder = placeholder
if nonspatial:
assert roi is None, "Non-spatial arrays can not have a ROI"
assert voxel_size is None, "Non-spatial arrays can not " "have a voxel size"
self.freeze()
def update_with(self, spec):
if self.roi is not None and spec.roi is not None:
self.roi = self.roi.union(spec.roi)
elif spec.roi is not None:
self.roi = spec.roi
if spec.voxel_size is not None:
self.voxel_size = spec.voxel_size
if spec.interpolatable is not None:
self.interpolatable = spec.interpolatable
if spec.nonspatial is not None:
self.nonspatial = spec.nonspatial
if spec.dtype is not None:
self.dtype = spec.dtype
if spec.placeholder is not None:
self.placeholder = spec.placeholder
def copy(self):
"""Create a copy of this spec."""
return copy.deepcopy(self)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __repr__(self):
r = ""
r += "ROI: " + str(self.roi) + ", "
r += "voxel size: " + str(self.voxel_size) + ", "
r += "interpolatable: " + str(self.interpolatable) + ", "
r += "non-spatial: " + str(self.nonspatial) + ", "
r += "dtype: " + str(self.dtype) + ", "
r += "placeholder: " + str(self.placeholder)
return r
| 3,293 | 30.075472 | 88 | py |
gunpowder | gunpowder-master/gunpowder/ndarray.py | import numpy as np
def replace(array, old_values, new_values):
"""Replace all occurences of ``old_values[i]`` with ``new_values[i]`` in the
given array."""
old_values = np.array(old_values)
new_values = np.array(new_values)
values_map = np.arange(int(array.max() + 1), dtype=new_values.dtype)
values_map[old_values] = new_values
return values_map[array]
| 387 | 24.866667 | 80 | py |
gunpowder | gunpowder-master/gunpowder/freezable.py | class Freezable(object):
__isfrozen = False
def __setattr__(self, key, value):
if self.__isfrozen and not hasattr(self, key):
raise TypeError("%r is frozen, you can't add attributes to it" % self)
object.__setattr__(self, key, value)
def freeze(self):
self.__isfrozen = True
def thaw(self):
self.__isfrozen = False
| 378 | 26.071429 | 82 | py |
gunpowder | gunpowder-master/gunpowder/batch_request.py | import copy
from .provider_spec import ProviderSpec
from .roi import Roi
from .array import ArrayKey
from .array_spec import ArraySpec
from .graph import GraphKey
from .graph_spec import GraphSpec
from warnings import warn
import time
class BatchRequest(ProviderSpec):
"""A collection of (possibly partial) :class:`ArraySpec` and
:class:`GraphSpec` forming a request.
Inherits from :class:`ProviderSpec`.
See :ref:`sec_requests_batches` for how to use a batch request to obtain a
batch.
Additional Kwargs:
random_seed (``int``):
The random seed that will be associated with this batch to
guarantee deterministic and repeatable batch requests.
"""
def __init__(self, *args, random_seed=None, **kwargs):
self._random_seed = (
random_seed if random_seed is not None else int(time.time() * 1e6)
)
super().__init__(*args, **kwargs)
def add(self, key, shape, voxel_size=None, directed=None, placeholder=False):
"""Convenience method to add an array or graph spec by providing only
the shape of a ROI (in world units).
A ROI with zero-offset will be generated. If more than one request is
added, the ROIs with smaller shapes will be shifted to be centered in
the largest one.
Args:
key (:class:`ArrayKey` or :class:`GraphKey`):
The key for which to add a spec.
shape (:class:`Coordinate`):
A tuple containing the shape of the desired roi
voxel_size (:class:`Coordinate`):
A tuple contening the voxel sizes for each corresponding
dimension
"""
if isinstance(key, ArrayKey):
spec = ArraySpec(placeholder=placeholder)
elif isinstance(key, GraphKey):
spec = GraphSpec(placeholder=placeholder, directed=directed)
else:
raise RuntimeError("Only ArrayKey or GraphKey can be added.")
spec.roi = Roi((0,) * len(shape), shape)
if voxel_size is not None:
spec.voxel_size = voxel_size
self[key] = spec
self.__center_rois()
def copy(self):
"""Create a copy of this request."""
return copy.deepcopy(self)
@property
def random_seed(self):
return self._random_seed % (2**32)
def _update_random_seed(self):
self._random_seed = hash((self._random_seed + 1) ** 2)
def __center_rois(self):
"""Ensure that all ROIs are centered around the same location."""
total_roi = self.get_total_roi()
if total_roi is None:
return
center = total_roi.center
for specs_type in [self.array_specs, self.graph_specs]:
for key in specs_type:
roi = specs_type[key].roi
specs_type[key].roi = roi.shift(center - roi.center)
def update_with(self, request):
"""Update current request with another"""
assert isinstance(request, BatchRequest)
merged = self.copy()
for key, spec in request.items():
if key not in merged:
merged[key] = spec
else:
merged[key].update_with(spec)
return merged
def merge(self, request):
"""Merge another request with current request"""
warn(
"merge is deprecated! please use update_with "
"as it accounts for spec metadata"
)
assert isinstance(request, BatchRequest)
merged = self.copy()
for key, spec in request.items():
if key not in merged:
merged[key] = spec
else:
if isinstance(spec, ArraySpec) and merged[key].nonspatial:
merged[key] = spec
else:
merged[key].roi = merged[key].roi.union(spec.roi)
return merged
def __eq__(self, other):
"""
Override equality check to allow batche requests with different
seeds to still be checked. Otherwise equality check should
never succeed.
"""
if isinstance(other, self.__class__):
other_dict = copy.deepcopy(other.__dict__)
self_dict = copy.deepcopy(self.__dict__)
other_dict.pop("_random_seed")
self_dict.pop("_random_seed")
return self_dict == other_dict
return NotImplemented
| 4,468 | 28.596026 | 81 | py |
gunpowder | gunpowder-master/gunpowder/__init__.py | from __future__ import absolute_import
from .nodes import *
from .array import Array, ArrayKey, ArrayKeys
from .array_spec import ArraySpec
from .batch import Batch
from .batch_request import BatchRequest
from .build import build
from .coordinate import Coordinate
from .graph import Graph, Node, Edge, GraphKey, GraphKeys
from .graph_spec import GraphSpec
from .pipeline import *
from .producer_pool import ProducerPool
from .provider_spec import ProviderSpec
from .roi import Roi
from .version_info import _version as version
import gunpowder.contrib
import gunpowder.tensorflow
import gunpowder.torch
import gunpowder.jax
import gunpowder.zoo
| 648 | 27.217391 | 57 | py |
gunpowder | gunpowder-master/gunpowder/morphology.py | import numpy as np
from scipy.ndimage.morphology import distance_transform_edt
def enlarge_binary_map(
binary_map, radius, voxel_size, ring_fraction=None, in_place=False
):
"""Enlarge existing regions in a binary map.
Args:
binary_map (numpy array):
A matrix with zeros, in which regions to be enlarged are indicated
with a 1 (regions can already represent larger areas).
radius (``ndarray`` of ``float``):
The amount by which to enlarge forground objects in world units.
voxel_size (tuple, list or numpy array):
Indicates the physical voxel size of the binary_map.
ring_fraction (``float``, optional):
If set, instead of just enlargin objects, a ring is grown around
them (and the objects removed). The thickness of the ring is set
with this parameter as a fraction of the radius.
in_place (bool, optional):
If set to ``True``, argument ``binary_map`` will be modified
directly.
Returns:
A matrix with 0s and 1s of same dimension as input binary_map with
enlarged regions (indicated with 1), unless ``in_place`` is set.
"""
if len(np.unique(binary_map)) == 1:
# Check whether there are regions at all. If there is no region (or
# everything is full), return the same map.
return binary_map
if voxel_size is None:
voxel_size = (1,) * binary_map.shape[0]
voxel_size = np.asarray(voxel_size).astype(np.float32)
# normalize, such that radius == 1 in all dimensions
voxel_size = voxel_size / radius
if in_place:
np.logical_not(binary_map, out=binary_map)
else:
binary_map = np.logical_not(binary_map)
edtmap = distance_transform_edt(binary_map, sampling=voxel_size)
# grow objects
if in_place:
binary_map[:] = edtmap <= 1.0
else:
binary_map = edtmap <= 1.0
# unmask inner part, if requested
if ring_fraction is not None:
binary_map[edtmap <= 1.0 - ring_fraction] = False
if in_place:
return None
return binary_map
def create_ball_kernel(radius, voxel_size):
"""Generates a ball-shaped structuring element.
Args:
radius (``ndarray`` of ``float``):
The radius of the ball-shaped structuring element in world-units.
voxel_size (tuple, list or numpy array):
Indicates the physical voxel size of the structuring element.
Returns:
The structuring element where elements of the neighborhood are 1 and 0
otherwise. The shape of the returned array depends on radius and
voxel_size. For instance voxel_size = [2, 1, 1], radius = 5 produces an
array of shape (7, 11, 11)
"""
voxel_size = np.asarray(voxel_size)
# Calculate shape for new kernel, make it sufficiently large (--> ceil)
radius_voxel = np.ceil(radius / voxel_size).astype(int)
kernel_shape = np.array(radius_voxel) * 2 + 1
kernel = np.zeros(kernel_shape, dtype=np.uint8)
middle_point = kernel_shape // 2
kernel[tuple(middle_point)] = 1
enlarge_binary_map(kernel, radius, voxel_size, in_place=True)
return kernel
| 3,244 | 28.5 | 79 | py |
gunpowder | gunpowder-master/gunpowder/compat.py | import sys
PY2 = sys.version_info[0] == 2
if PY2:
binary_type = str
else:
binary_type = bytes
def ensure_str(s):
if PY2:
if isinstance(s, buffer):
s = str(s)
else:
if isinstance(s, memoryview):
s = s.tobytes()
if isinstance(s, binary_type):
s = s.decode("ascii")
return s
| 356 | 16 | 38 | py |
gunpowder | gunpowder-master/gunpowder/version_info.py | __major__ = 1
__minor__ = 3
__patch__ = 0
__tag__ = ""
__version__ = "{}.{}.{}{}".format(__major__, __minor__, __patch__, __tag__).strip(".")
class _Version(object):
def major(self):
return __major__
def minor(self):
return __minor__
def patch(self):
return __patch__
def tag(self):
return __tag__
def version(self):
return __version__
def __str__(self):
return self.version()
_version = _Version()
if __name__ == "__main__":
print(_version)
| 528 | 15.53125 | 86 | py |
gunpowder | gunpowder-master/gunpowder/coordinate.py | from funlib.geometry import Coordinate # noqa
| 47 | 23 | 46 | py |
gunpowder | gunpowder-master/gunpowder/pipeline.py | import logging
from gunpowder.nodes import BatchProvider
logger = logging.getLogger(__name__)
class PipelineSetupError(Exception):
def __init__(self, provider):
self.provider = provider
def __str__(self):
return f"Exception in {self.provider.name()} while calling setup()"
class PipelineTeardownError(Exception):
def __init__(self, provider):
self.provider = provider
def __str__(self):
return f"Exception in {self.provider.name()} while calling teardown()"
class PipelineRequestError(Exception):
def __init__(self, pipeline, request):
self.pipeline = pipeline
self.request = request
def __str__(self):
return (
"Exception in pipeline:\n"
f"{self.pipeline}\n"
"while trying to process request\n"
f"{self.request}"
)
class Pipeline:
def __init__(self, node):
"""Create a pipeline from a single :class:`BatchProvider`."""
assert isinstance(node, BatchProvider), f"{type(node)} is not a BatchProvider"
self.output = node
self.children = []
self.initialized = False
def traverse(self, callback, reverse=False):
"""Visit every node in the pipeline recursively (either from root to
leaves of from leaves to the root if ``reverse`` is true). ``callback``
will be called for each node encountered."""
result = []
if not reverse:
result.append(callback(self))
for child in self.children:
result.append(child.traverse(callback, reverse))
if reverse:
result.append(callback(self))
return result
def copy(self):
"""Make a shallow copy of the pipeline."""
pipeline = Pipeline(self.output)
pipeline.children = [c.copy() for c in self.children]
return pipeline
def setup(self):
"""Connect all batch providers in the pipeline and call setup for
each, from source to sink."""
def connect(node):
for child in node.children:
node.output.add_upstream_provider(child.output)
# connect all nodes
self.traverse(connect)
# call setup on all nodes
if not self.initialized:
def node_setup(node):
try:
node.output.setup()
except Exception as e:
raise PipelineSetupError(node.output) from e
self.traverse(node_setup, reverse=True)
self.initialized = True
else:
logger.warning(
"pipeline.setup() called more than once (build() inside " "build()?)"
)
def internal_teardown(self):
"""Call teardown on each batch provider in the pipeline and disconnect
all nodes."""
try:
def node_teardown(node):
try:
node.output.internal_teardown()
except Exception as e:
raise PipelineTeardownError(node.output) from e
# call internal_teardown on all nodes
self.traverse(node_teardown, reverse=True)
self.initialized = False
finally:
# disconnect all nodes
def disconnect(node):
node.output.remove_upstream_providers()
self.traverse(disconnect)
def request_batch(self, request):
"""Request a batch from the pipeline."""
try:
return self.output.request_batch(request)
except Exception as e:
raise PipelineRequestError(self, request) from e
@property
def spec(self):
return self.output.spec
def __add__(self, other):
if isinstance(other, BatchProvider):
other = Pipeline(other)
if isinstance(other, Pipeline):
result = other.copy()
# add this pipeline as child to all leaves in other
def add_self_to_leaves(node):
if len(node.children) == 0:
node.children.append(self.copy())
result.traverse(add_self_to_leaves, reverse=True)
else:
raise RuntimeError(f"Don't know how to add {type(other)} to Pipeline")
return result
def __radd__(self, other):
assert isinstance(
other, tuple
), f"Don't know how to radd {type(other)} to Pipeline"
for o in other:
assert isinstance(o, Pipeline) or isinstance(
o, BatchProvider
), f"Don't know how to radd {type(o)} to Pipeline"
other = tuple(
Pipeline(o) if isinstance(o, BatchProvider) else o.copy() for o in other
)
result = self.copy()
# add every other as child to leaves in pipeline
def add_others_to_leaves(node):
if len(node.children) == 0:
for o in other:
node.children.append(o)
result.traverse(add_others_to_leaves, reverse=True)
return result
def __repr__(self):
def to_string(node):
return node.output.name()
reprs = self.traverse(to_string, reverse=True)
return self.__rec_repr__(reprs)
def __rec_repr__(self, reprs):
if not isinstance(reprs, list):
return str(reprs)
num_children = len(reprs) - 1
res = ""
if num_children > 0:
if num_children > 1:
res = "("
res += ", ".join(self.__rec_repr__(r) for r in reprs[:-1])
if num_children > 0:
if num_children > 1:
res += ")"
res += " -> "
res += reprs[-1]
return res
| 5,738 | 27.270936 | 86 | py |
gunpowder | gunpowder-master/gunpowder/build.py | import logging
logger = logging.getLogger(__name__)
class build(object):
def __init__(self, pipeline):
self.pipeline = pipeline
def __enter__(self):
try:
self.pipeline.setup()
except:
logger.error(
"something went wrong during the setup of the pipeline, calling tear down"
)
self.pipeline.internal_teardown()
logger.debug("tear down completed")
raise
return self.pipeline
def __exit__(self, type, value, traceback):
logger.debug("leaving context, tearing down pipeline")
self.pipeline.internal_teardown()
logger.debug("tear down completed")
| 702 | 26.038462 | 90 | py |
gunpowder | gunpowder-master/gunpowder/graph_spec.py | import numpy as np
import copy
from .freezable import Freezable
class GraphSpec(Freezable):
"""Contains meta-information about a graph. This is used by
:class:`BatchProviders<BatchProvider>` to communicate the graphs they
offer, as well as by :class:`Graph` to describe the data they contain.
Attributes:
roi (:class:`Roi`):
The region of interested represented by this graph.
directed (``bool``, optional):
Whether the graph is directed or not.
dtype (``dtype``, optional):
The data type of the "location" attribute.
Currently only supports np.float32.
"""
def __init__(self, roi=None, directed=None, dtype=np.float32, placeholder=False):
self.roi = roi
self.directed = directed
self.dtype = dtype
self.placeholder = placeholder
self.freeze()
def update_with(self, spec):
if self.roi is not None and spec.roi is not None:
self.roi = self.roi.union(spec.roi)
elif spec.roi is not None:
self.roi = spec.roi
if spec.directed is not None:
self.directed = spec.directed
if spec.dtype is not None:
self.dtype = spec.dtype
if spec.placeholder is not None:
self.placeholder = spec.placeholder
def copy(self):
"""Create a copy of this spec."""
return copy.deepcopy(self)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __repr__(self):
r = ""
r += "ROI: " + str(self.roi) + ", "
r += "dtype: " + str(self.dtype) + ", "
r += "directed: " + str(self.directed) + ", "
r += "placeholder: " + str(self.placeholder)
return r
| 2,002 | 26.438356 | 85 | py |
gunpowder | gunpowder-master/gunpowder/producer_pool.py | try:
import Queue
except:
import queue as Queue
import logging
import multiprocessing
import os
import sys
import time
import traceback
import numpy as np
logger = logging.getLogger(__name__)
class NoResult(Exception):
pass
class ParentDied(Exception):
pass
class WorkersDied(Exception):
pass
class ProducerPool(object):
def __init__(self, callables, queue_size=10):
self.__watch_dog = multiprocessing.Process(
target=self._run_watch_dog, args=(callables,)
)
self.__stop = multiprocessing.Event()
self.__result_queue = multiprocessing.Queue(queue_size)
def __del__(self):
self.stop()
def start(self):
"""Start the pool of producers."""
if self.__watch_dog is None:
raise RuntimeError("can't start a ProducerPool a second time")
if self.__watch_dog.is_alive():
logger.warning("trying to start workers, but they are already running")
return
self.__stop.clear()
self.__watch_dog.start()
def get(self, timeout=0):
"""Return the next result from the producer pool.
If timeout is set and there is not result after the given number of
seconds, exception NoResult is raised.
"""
block = False
if timeout == 0:
timeout = 1
block = True
item = None
while item == None:
try:
item = self.__result_queue.get(timeout=timeout)
except Queue.Empty:
if not block:
raise NoResult()
if isinstance(item, Exception):
raise item
return item
def stop(self):
"""Stop the pool of producers.
Items currently being produced will not be waited for and be discarded."""
if self.__watch_dog is None:
return
self.__stop.set()
if self.__watch_dog._popen is not None:
self.__watch_dog.join()
self.__watch_dog = None
def _run_watch_dog(self, callables):
parent_pid = os.getppid()
logger.debug("watchdog started with PID " + str(os.getpid()))
logger.debug("parent PID " + str(parent_pid))
workers = [
multiprocessing.Process(target=self._run_worker, args=(c,))
for c in callables
]
try:
logger.debug("starting %d workers" % len(workers))
for worker in workers:
worker.start()
while not self.__stop.wait(1):
if os.getppid() != parent_pid:
logger.error("parent of producer pool died, shutting down")
self.__result_queue.put(ParentDied())
break
if not self._all_workers_alive(workers):
logger.error("at least one of my workers died, shutting down")
self.__result_queue.put(WorkersDied())
break
except:
pass
finally:
logger.info("terminating workers...")
for worker in workers:
worker.terminate()
logger.info("joining workers...")
for worker in workers:
worker.join()
logger.info("done")
def _run_worker(self, target):
parent_pid = os.getppid()
logger.debug("worker started with PID " + str(os.getpid()))
logger.debug("parent PID " + str(parent_pid))
result = None
np.random.seed(None)
while True:
if os.getppid() != parent_pid:
logger.debug("worker %d: watch-dog died, stopping" % os.getpid())
break
if result is None:
try:
result = target()
except Exception as e:
logger.error(e, exc_info=True)
result = e
traceback.print_exc()
# don't stop on normal exceptions -- place them in result queue
# and let them be handled by caller
except:
logger.error("received error: " + str(sys.exc_info()[0]))
# this is most likely a keyboard interrupt, stop process
break
try:
self.__result_queue.put(result, timeout=1)
result = None
except Queue.Full:
logger.debug(
"worker %d: result queue is full, waiting to place my result"
% os.getpid()
)
logger.debug("worker with PID " + str(os.getpid()) + " exiting")
os._exit(1)
def _all_workers_alive(self, workers):
return all([worker.is_alive() for worker in workers])
| 4,825 | 27.388235 | 83 | py |
gunpowder | gunpowder-master/gunpowder/torch/__init__.py | from __future__ import absolute_import
from .nodes import *
| 61 | 14.5 | 38 | py |
gunpowder | gunpowder-master/gunpowder/torch/nodes/__init__.py | from __future__ import absolute_import
from .train import Train
from .predict import Predict
__all__ = ["Train", "Predict"]
| 126 | 17.142857 | 38 | py |
gunpowder | gunpowder-master/gunpowder/torch/nodes/predict.py | from gunpowder.array import ArrayKey, Array
from gunpowder.array_spec import ArraySpec
from gunpowder.ext import torch
from gunpowder.nodes.generic_predict import GenericPredict
import logging
from typing import Dict, Union
logger = logging.getLogger(__name__)
class Predict(GenericPredict):
"""Torch implementation of :class:`gunpowder.nodes.Predict`.
Args:
model (subclass of ``torch.nn.Module``):
The model to use for prediction.
inputs (``dict``, ``string`` -> :class:`ArrayKey`):
Dictionary from the names of input tensors (argument names of the
``forward`` method) in the model to array keys.
outputs (``dict``, ``string`` or ``int`` -> :class:`ArrayKey`):
Dictionary from the names of tensors in the network to array
keys. If the key is a string, the tensor will be retrieved
by checking the model for an attribute with the key as its name.
If the key is an integer, it is interpreted as a tuple index of
the outputs of the network.
New arrays will be generated by this node for each entry (if
requested downstream).
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`, optional):
Used to set the specs of generated arrays (``outputs``). This is
useful to set the ``voxel_size``, for example, if they differ from
the voxel size of the input arrays. Only fields that are not
``None`` in the given :class:`ArraySpec` will be used.
checkpoint: (``string``, optional):
An optional path to the saved parameters for your torch module.
These will be loaded and used for prediction if provided.
device (``string``, optional):
Which device to use for prediction (``"cpu"`` or ``"cuda"``).
Default is ``"cuda"``, which falls back to CPU if CUDA is not
available.
spawn_subprocess (bool, optional): Whether to run ``predict`` in a
separate process. Default is false.
"""
def __init__(
self,
model,
inputs: Dict[str, ArrayKey],
outputs: Dict[Union[str, int], ArrayKey],
array_specs: Dict[ArrayKey, ArraySpec] = None,
checkpoint: str = None,
device="cuda",
spawn_subprocess=False,
):
self.array_specs = array_specs if array_specs is not None else {}
if model.training:
logger.warning(
"Model is in training mode during prediction. "
"Consider using model.eval()"
)
super(Predict, self).__init__(
inputs, outputs, array_specs, spawn_subprocess=spawn_subprocess
)
self.device_string = device
self.device = None # to be set in start()
self.model = model
self.checkpoint = checkpoint
self.intermediate_layers = {}
self.register_hooks()
def start(self):
self.use_cuda = torch.cuda.is_available() and self.device_string == "cuda"
logger.info(f"Predicting on {'gpu' if self.use_cuda else 'cpu'}")
self.device = torch.device("cuda" if self.use_cuda else "cpu")
try:
self.model = self.model.to(self.device)
except RuntimeError as e:
raise RuntimeError(
"Failed to move model to device. If you are using a child process "
"to run your model, maybe you already initialized CUDA by sending "
"your model to device in the main process."
) from e
if self.checkpoint is not None:
checkpoint = torch.load(self.checkpoint, map_location=self.device)
if "model_state_dict" in checkpoint:
self.model.load_state_dict(checkpoint["model_state_dict"])
else:
self.model.load_state_dict()
def predict(self, batch, request):
inputs = self.get_inputs(batch)
with torch.no_grad():
out = self.model.forward(**inputs)
outputs = self.get_outputs(out, request)
self.update_batch(batch, request, outputs)
def get_inputs(self, batch):
model_inputs = {
key: torch.as_tensor(batch[value].data, device=self.device)
for key, value in self.inputs.items()
}
return model_inputs
def register_hooks(self):
for key in self.outputs:
if isinstance(key, str):
layer = getattr(self.model, key)
layer.register_forward_hook(self.create_hook(key))
def create_hook(self, key):
def save_layer(module, input, output):
self.intermediate_layers[key] = output
return save_layer
def get_outputs(self, module_out, request):
outputs = {}
if isinstance(module_out, tuple):
module_outs = module_out
else:
module_outs = (module_out,)
for key, value in self.outputs.items():
if value in request:
if isinstance(key, str):
outputs[value] = self.intermediate_layers[key]
elif isinstance(key, int):
outputs[value] = module_outs[key]
return outputs
def update_batch(self, batch, request, requested_outputs):
for array_key, tensor in requested_outputs.items():
spec = self.spec[array_key].copy()
spec.roi = request[array_key].roi
batch.arrays[array_key] = Array(tensor.cpu().detach().numpy(), spec)
def stop(self):
pass
| 5,622 | 34.815287 | 83 | py |
gunpowder | gunpowder-master/gunpowder/torch/nodes/train.py | import logging
import numpy as np
from gunpowder.array import ArrayKey, Array
from gunpowder.array_spec import ArraySpec
from gunpowder.ext import torch, tensorboardX, NoSuchModule
from gunpowder.nodes.generic_train import GenericTrain
from typing import Dict, Union, Optional
logger = logging.getLogger(__name__)
class Train(GenericTrain):
"""Torch implementation of :class:`gunpowder.nodes.GenericTrain`.
Args:
model (subclass of ``torch.nn.Module``):
The model to train.
loss:
The torch loss to use.
optimizer:
The torch optimizer to use.
inputs (``dict``, ``string`` -> :class:`ArrayKey`):
Dictionary from the names of input tensors (argument names of the
``forward`` method) in the model to array keys.
loss_inputs (``dict``, ``string`` or ``int`` -> :class:`ArrayKey`):
Dictionary with the names of input variables to the loss function as
keys, and ArrayKeys containing the desired data as values. Keys can
be either strings or integers. If the key is an integer, it will
be treated as a positional argument to the loss function, a
string will be used as a named argument
outputs (``dict``, ``string`` or ``int`` -> :class:`ArrayKey`):
Dictionary from the names of tensors in the network to array
keys. If the key is a string, the tensor will be retrieved
by checking the model for an attribute with they key as its name.
If the key is an integer, it is interpreted as a tuple index of
the outputs of the network.
New arrays will be generated by this node for each entry (if
requested downstream).
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`, optional):
Used to set the specs of generated arrays (at the moment only
``output``). This is useful to set the ``voxel_size``, for example,
if they differ from the voxel size of the input arrays. Only fields
that are not ``None`` in the given :class:`ArraySpec` will be used.
checkpoint_basename (``string``, optional):
The basename used for checkpoint files. Defaults to ``model``.
save_every (``int``, optional):
After how many iterations to create a checkpoint to store the
learnt weights.
log_dir (``string``, optional):
Directory for saving tensorboard summaries.
log_every (``int``, optional):
After how many iterations to write out tensorboard summaries.
spawn_subprocess (``bool``, optional):
Whether to run the ``train_step`` in a separate process. Default is false.
"""
def __init__(
self,
model,
loss,
optimizer,
inputs: Dict[str, ArrayKey],
outputs: Dict[Union[int, str], ArrayKey],
loss_inputs: Dict[Union[int, str], ArrayKey],
gradients: Dict[Union[int, str], ArrayKey] = {},
array_specs: Optional[Dict[ArrayKey, ArraySpec]] = None,
checkpoint_basename: str = "model",
save_every: int = 2000,
log_dir: str = None,
log_every: int = 1,
spawn_subprocess: bool = False,
):
if not model.training:
logger.warning(
"Model is in evaluation mode during training. "
"Consider using model.train()"
)
# not yet implemented
gradients = gradients
inputs.update(
{k: v for k, v in loss_inputs.items() if v not in outputs.values()}
)
super(Train, self).__init__(
inputs, outputs, gradients, array_specs, spawn_subprocess=spawn_subprocess
)
self.model = model
self.loss = loss
self.optimizer = optimizer
self.loss_inputs = loss_inputs
self.checkpoint_basename = checkpoint_basename
self.save_every = save_every
self.iteration = 0
if not isinstance(tensorboardX, NoSuchModule) and log_dir is not None:
self.summary_writer = tensorboardX.SummaryWriter(log_dir)
self.log_every = log_every
else:
self.summary_writer = None
if log_dir is not None:
logger.warning("log_dir given, but tensorboardX is not installed")
self.intermediate_layers = {}
self.register_hooks()
def register_hooks(self):
for key in self.outputs:
if isinstance(key, str):
layer = getattr(self.model, key)
layer.register_forward_hook(self.create_hook(key))
def create_hook(self, key):
def save_layer(module, input, output):
self.intermediate_layers[key] = output
return save_layer
def retain_gradients(self, request, outputs):
for array_name, array_key in self.gradients.items():
if array_key not in request:
continue
if isinstance(array_name, int):
tensor = outputs[array_name]
elif isinstance(array_name, str):
tensor = getattr(self.model, array_name)
else:
raise RuntimeError(
"only ints and strings are supported as gradients keys"
)
tensor.retain_grad()
def start(self):
self.use_cuda = torch.cuda.is_available()
self.device = torch.device("cuda" if self.use_cuda else "cpu")
try:
self.model = self.model.to(self.device)
except RuntimeError as e:
raise RuntimeError(
"Failed to move model to device. If you are using a child process "
"to run your model, maybe you already initialized CUDA by sending "
"your model to device in the main process."
) from e
if isinstance(self.loss, torch.nn.Module):
self.loss = self.loss.to(self.device)
checkpoint, self.iteration = self._get_latest_checkpoint(
self.checkpoint_basename
)
if checkpoint is not None:
logger.info("Resuming training from iteration %d", self.iteration)
logger.info("Loading %s", checkpoint)
checkpoint = torch.load(checkpoint, map_location=self.device)
self.model.load_state_dict(checkpoint["model_state_dict"])
self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
else:
logger.info("Starting training from scratch")
logger.info("Using device %s", self.device)
def train_step(self, batch, request):
inputs = self.__collect_provided_inputs(batch)
requested_outputs = self.__collect_requested_outputs(request)
# keys are argument names of model forward pass
device_inputs = {
k: torch.as_tensor(v, device=self.device) for k, v in inputs.items()
}
# get outputs. Keys are tuple indices or model attr names as in self.outputs
self.optimizer.zero_grad()
model_outputs = self.model(**device_inputs)
if isinstance(model_outputs, tuple):
outputs = {i: model_outputs[i] for i in range(len(model_outputs))}
elif isinstance(model_outputs, torch.Tensor):
outputs = {0: model_outputs}
else:
raise RuntimeError(
"Torch train node only supports return types of tuple",
f"and torch.Tensor from model.forward(). not {type(model_outputs)}",
)
outputs.update(self.intermediate_layers)
# Some inputs to the loss should come from the batch, not the model
provided_loss_inputs = self.__collect_provided_loss_inputs(batch)
device_loss_inputs = {
k: torch.as_tensor(v, device=self.device)
for k, v in provided_loss_inputs.items()
}
# Some inputs to the loss function should come from the outputs of the model
# Update device loss inputs with tensors from outputs if available
flipped_outputs = {v: outputs[k] for k, v in self.outputs.items()}
device_loss_inputs = {
k: flipped_outputs.get(v, device_loss_inputs.get(k))
for k, v in self.loss_inputs.items()
}
device_loss_args = []
for i in range(len(device_loss_inputs)):
if i in device_loss_inputs:
device_loss_args.append(device_loss_inputs.pop(i))
else:
break
device_loss_kwargs = {}
for k, v in list(device_loss_inputs.items()):
if isinstance(k, str):
device_loss_kwargs[k] = device_loss_inputs.pop(k)
assert (
len(device_loss_inputs) == 0
), f"Not all loss inputs could be interpreted. Failed keys: {device_loss_inputs.keys()}"
self.retain_gradients(request, outputs)
logger.debug("model outputs: %s", {k: v.shape for k, v in outputs.items()})
logger.debug(
"loss inputs: %s %s",
[v.shape for v in device_loss_args],
{k: v.shape for k, v in device_loss_kwargs.items()},
)
loss = self.loss(*device_loss_args, **device_loss_kwargs)
loss.backward()
self.optimizer.step()
# add requested model outputs to batch
for array_key, array_name in requested_outputs.items():
spec = self.spec[array_key].copy()
spec.roi = request[array_key].roi
batch.arrays[array_key] = Array(
outputs[array_name].cpu().detach().numpy(), spec
)
for array_name, array_key in self.gradients.items():
if array_key not in request:
continue
if isinstance(array_name, int):
tensor = outputs[array_name]
elif isinstance(array_name, str):
tensor = getattr(self.model, array_name)
else:
raise RuntimeError(
"only ints and strings are supported as gradients keys"
)
spec = self.spec[array_key].copy()
spec.roi = request[array_key].roi
batch.arrays[array_key] = Array(tensor.grad.cpu().detach().numpy(), spec)
for array_key, array_name in requested_outputs.items():
spec = self.spec[array_key].copy()
spec.roi = request[array_key].roi
batch.arrays[array_key] = Array(
outputs[array_name].cpu().detach().numpy(), spec
)
batch.loss = loss.cpu().detach().numpy()
self.iteration += 1
batch.iteration = self.iteration
if batch.iteration % self.save_every == 0:
checkpoint_name = self._checkpoint_name(
self.checkpoint_basename, batch.iteration
)
logger.info("Creating checkpoint %s", checkpoint_name)
torch.save(
{
"model_state_dict": self.model.state_dict(),
"optimizer_state_dict": self.optimizer.state_dict(),
},
checkpoint_name,
)
if self.summary_writer and batch.iteration % self.log_every == 0:
self.summary_writer.add_scalar("loss", batch.loss, batch.iteration)
def __collect_requested_outputs(self, request):
array_outputs = {}
for output_name, array_key in self.outputs.items():
if array_key in request:
array_outputs[array_key] = output_name
return array_outputs
def __collect_provided_inputs(self, batch):
return self.__collect_provided_arrays(
{k: v for k, v in self.inputs.items() if k not in self.loss_inputs}, batch
)
def __collect_provided_loss_inputs(self, batch):
return self.__collect_provided_arrays(
self.loss_inputs, batch, expect_missing_arrays=True
)
def __collect_provided_arrays(self, reference, batch, expect_missing_arrays=False):
arrays = {}
for array_name, array_key in reference.items():
if isinstance(array_key, ArrayKey):
msg = f"batch does not contain {array_key}, array {array_name} will not be set"
if array_key in batch.arrays:
arrays[array_name] = batch.arrays[array_key].data
elif not expect_missing_arrays:
logger.warn(msg)
else:
logger.debug(msg)
elif isinstance(array_key, np.ndarray):
arrays[array_name] = array_key
elif isinstance(array_key, str):
arrays[array_name] = getattr(batch, array_key)
else:
raise Exception(
"Unknown network array key {}, can't be given to "
"network".format(array_key)
)
return arrays
| 13,024 | 36.002841 | 96 | py |
gunpowder | gunpowder-master/gunpowder/zoo/__init__.py | 0 | 0 | 0 | py | |
gunpowder | gunpowder-master/gunpowder/zoo/tensorflow/unet.py | import tensorflow as tf
def conv_pass(
fmaps_in,
kernel_size,
num_fmaps,
num_repetitions,
activation="relu",
name="conv_pass",
):
"""Create a convolution pass::
f_in --> f_1 --> ... --> f_n
where each ``-->`` is a convolution followed by a (non-linear) activation
function and ``n`` ``num_repetitions``. Each convolution will decrease the
size of the feature maps by ``kernel_size-1``.
Args:
f_in:
The input tensor of shape ``(batch_size, channels, depth, height,
width)`` or ``(batch_size, channels, height, width)``.
kernel_size:
Size of the kernel. Forwarded to the tensorflow convolution layer.
num_fmaps:
The number of feature maps to produce with each convolution.
num_repetitions:
How many convolutions to apply.
activation:
Which activation to use after a convolution. Accepts the name of any
tensorflow activation function (e.g., ``relu`` for ``tf.nn.relu``).
"""
fmaps = fmaps_in
if activation is not None:
activation = getattr(tf.nn, activation)
conv_layer = getattr(
tf.layers, {2: "conv2d", 3: "conv3d"}[fmaps_in.get_shape().ndims - 2]
)
for i in range(num_repetitions):
fmaps = conv_layer(
inputs=fmaps,
filters=num_fmaps,
kernel_size=kernel_size,
padding="valid",
data_format="channels_first",
activation=activation,
name=name + "_%i" % i,
)
return fmaps
def downsample(fmaps_in, factors, name="down"):
pooling_layer = getattr(
tf.layers,
{2: "max_pooling2d", 3: "max_pooling3d"}[fmaps_in.get_shape().ndims - 2],
)
fmaps = pooling_layer(
fmaps_in,
pool_size=factors,
strides=factors,
padding="valid",
data_format="channels_first",
name=name,
)
return fmaps
def upsample(fmaps_in, factors, num_fmaps, activation="relu", name="up"):
if activation is not None:
activation = getattr(tf.nn, activation)
conv_trans_layer = getattr(
tf.layers,
{2: "conv2d_transpose", 3: "conv3d_transpose"}[fmaps_in.get_shape().ndims - 2],
)
fmaps = conv_trans_layer(
fmaps_in,
filters=num_fmaps,
kernel_size=factors,
strides=factors,
padding="valid",
data_format="channels_first",
activation=activation,
name=name,
)
return fmaps
def crop_spatial(fmaps_in, shape):
"""Crop only the spacial dimensions to match shape.
Args:
fmaps_in:
The input tensor.
shape:
A list (not a tensor) with the requested shape [_, _, z, y, x] or
[_, _, y, x].
"""
in_shape = fmaps_in.get_shape().as_list()
offset = [0, 0] + [(in_shape[i] - shape[i]) // 2 for i in range(2, len(shape))]
size = in_shape[0:2] + shape[2:]
fmaps = tf.slice(fmaps_in, offset, size)
return fmaps
def unet(
fmaps_in, num_fmaps, fmap_inc_factor, downsample_factors, activation="relu", layer=0
):
"""Create a 2D or 3D U-Net::
f_in --> f_left --------------------------->> f_right--> f_out
| ^
v |
g_in --> g_left ------->> g_right --> g_out
| ^
v |
...
where each ``-->`` is a convolution pass (see ``conv_pass``), each `-->>` a
crop, and down and up arrows are max-pooling and transposed convolutions,
respectively.
The U-Net expects tensors to have shape ``(batch=1, channels, depth, height,
width)`` for 3D or ``(batch=1, channels, height, width)`` for 2D.
This U-Net performs only "valid" convolutions, i.e., sizes of the feature
maps decrease after each convolution.
Args:
fmaps_in:
The input tensor.
num_fmaps:
The number of feature maps in the first layer. This is also the
number of output feature maps.
fmap_inc_factor:
By how much to multiply the number of feature maps between layers.
If layer 0 has ``k`` feature maps, layer ``l`` will have
``k*fmap_inc_factor**l``.
downsample_factors:
List of lists ``[z, y, x]`` or ``[y, x]`` to use to down- and
up-sample the feature maps between layers.
activation:
Which activation to use after a convolution. Accepts the name of any
tensorflow activation function (e.g., ``relu`` for ``tf.nn.relu``).
layer:
Used internally to build the U-Net recursively.
"""
prefix = " " * layer
print(prefix + "Creating U-Net layer %i" % layer)
print(prefix + "f_in: " + str(fmaps_in.shape))
# convolve
f_left = conv_pass(
fmaps_in,
kernel_size=3,
num_fmaps=num_fmaps,
num_repetitions=2,
activation=activation,
name="unet_layer_%i_left" % layer,
)
# last layer does not recurse
bottom_layer = layer == len(downsample_factors)
if bottom_layer:
print(prefix + "bottom layer")
print(prefix + "f_out: " + str(f_left.shape))
return f_left
# downsample
g_in = downsample(
f_left, downsample_factors[layer], "unet_down_%i_to_%i" % (layer, layer + 1)
)
# recursive U-net
g_out = unet(
g_in,
num_fmaps=num_fmaps * fmap_inc_factor,
fmap_inc_factor=fmap_inc_factor,
downsample_factors=downsample_factors,
activation=activation,
layer=layer + 1,
)
print(prefix + "g_out: " + str(g_out.shape))
# upsample
g_out_upsampled = upsample(
g_out,
downsample_factors[layer],
num_fmaps,
activation=activation,
name="unet_up_%i_to_%i" % (layer + 1, layer),
)
print(prefix + "g_out_upsampled: " + str(g_out_upsampled.shape))
# copy-crop
f_left_cropped = crop_spatial(f_left, g_out_upsampled.get_shape().as_list())
print(prefix + "f_left_cropped: " + str(f_left_cropped.shape))
# concatenate along channel dimension
f_right = tf.concat([f_left_cropped, g_out_upsampled], 1)
print(prefix + "f_right: " + str(f_right.shape))
# convolve
f_out = conv_pass(
f_right,
kernel_size=3,
num_fmaps=num_fmaps,
num_repetitions=2,
name="unet_layer_%i_right" % layer,
)
print(prefix + "f_out: " + str(f_out.shape))
return f_out
| 6,758 | 24.996154 | 88 | py |
gunpowder | gunpowder-master/gunpowder/zoo/tensorflow/__init__.py | from .unet import unet, conv_pass
| 34 | 16.5 | 33 | py |
gunpowder | gunpowder-master/gunpowder/jax/generic_jax_model.py | class GenericJaxModel:
"""An interface for models to follow in order to train or predict. A model
implementing this interface will need to contain not only the forward
model but also loss and update fn. Some examples can be found in
https://github.com/funkelab/funlib.learn.jax
Args:
is_training (``bool``):
Indicating whether the model will be used for training
or inferencing.
"""
def __init__(self, is_training):
pass
def initialize(self, rng_key, inputs):
"""Initialize parameters for training.
Args:
rng_key (jax.random.PRNGKey):
Seed for parameter initialization
inputs (``dict``, ``string`` -> jnp.ndarray):
Dictionary of inputs, provided to initialize parameters
with the correct dimensions.
Return:
params (Any):
Function should return an object encapsulating different
parameters of the model.
"""
raise RuntimeError("Unimplemented")
def forward(self, params, inputs):
"""Run the forward model.
Args:
params (Any):
Model parameters.
inputs (``dict``, ``string`` -> jnp.ndarray):
Dictionary of inputs.
Return:
outputs (``dict``, ``string`` -> jnp.ndarray):
Dictionary of outputs.
"""
raise RuntimeError("Unimplemented")
def train_step(self, params, inputs, pmapped):
"""Run one iteration of training on the model.
Args:
params (Any):
Model parameters.
inputs (``dict``, ``string`` -> jnp.ndarray):
Dictionary of inputs.
pmapped (``bool``):
Whether the function is run with `jax.pmap` or not.
If pmapped across devices, the function should take care to
synchronize gradients during the train step.
The `axis_name` is set to the ``string`` "num_devices"
Return:
Tuple(new_params, outputs, loss)
new_params (Any):
Updated model parameters.
outputs (``dict``, ``string`` -> jnp.ndarray):
Dictionary of outputs.
loss (Union[``float``, (``dict``, ``string`` -> ``float``)]):
Loss value of this iteration. Value can either be a single
``float`` or a dictionary of multiple losses.
"""
raise RuntimeError("Unimplemented")
| 2,633 | 25.34 | 78 | py |
gunpowder | gunpowder-master/gunpowder/jax/__init__.py | from .generic_jax_model import GenericJaxModel
from .nodes import *
| 68 | 22 | 46 | py |
gunpowder | gunpowder-master/gunpowder/jax/nodes/__init__.py | from .train import Train
from .predict import Predict
__all__ = ["Train", "Predict"]
| 86 | 16.4 | 30 | py |
gunpowder | gunpowder-master/gunpowder/jax/nodes/predict.py | from gunpowder.array import ArrayKey, Array
from gunpowder.array_spec import ArraySpec
from gunpowder.ext import jax
from gunpowder.nodes.generic_predict import GenericPredict
from gunpowder.jax import GenericJaxModel
import pickle
import logging
from typing import Dict, Union
logger = logging.getLogger(__name__)
class Predict(GenericPredict):
"""JAX implementation of :class:`gunpowder.nodes.Predict`.
Args:
model (subclass of ``gunpowder.jax.GenericJaxModel``):
The model to use for prediction.
inputs (``dict``, ``string`` -> :class:`ArrayKey`):
Dictionary from the names of input tensors in the network to
array keys.
outputs (``dict``, ``string`` -> :class:`ArrayKey`):
Dictionary from the names of output tensors in the network to array
keys. New arrays will be generated by this node for each entry (if
requested downstream).
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`, optional):
Used to set the specs of generated arrays (``outputs``). This is
useful to set the ``voxel_size``, for example, if they differ from
the voxel size of the input arrays. Only fields that are not
``None`` in the given :class:`ArraySpec` will be used.
checkpoint: (``string``, optional):
An optional path to the saved parameters for your jax module.
These will be loaded and used for prediction if provided.
spawn_subprocess (bool, optional): Whether to run ``predict`` in a
separate process. Default is false.
"""
def __init__(
self,
model: GenericJaxModel,
inputs: Dict[str, ArrayKey],
outputs: Dict[Union[str, int], ArrayKey],
array_specs: Dict[ArrayKey, ArraySpec] = None,
checkpoint: str = None,
spawn_subprocess=False,
):
self.array_specs = array_specs if array_specs is not None else {}
super(Predict, self).__init__(
inputs, outputs, array_specs, spawn_subprocess=spawn_subprocess
)
self.model = model
self.checkpoint = checkpoint
self.model_params = None
def start(self):
if self.checkpoint is not None:
with open(self.checkpoint, "rb") as f:
self.model_params = pickle.load(f)
def predict(self, batch, request):
inputs = self.get_inputs(batch)
if self.model_params is None:
# need to init model first
rng = jax.random.PRNGKey(request.random_seed)
self.model_params = self.model.initialize(rng, inputs)
out = jax.jit(self.model.forward)(self.model_params, inputs)
outputs = self.get_outputs(out, request)
self.update_batch(batch, request, outputs)
def get_inputs(self, batch):
model_inputs = {
key: jax.device_put(batch[value].data) for key, value in self.inputs.items()
}
return model_inputs
def get_outputs(self, module_out, request):
outputs = {}
for key, value in self.outputs.items():
if value in request:
outputs[value] = module_out[key]
return outputs
def update_batch(self, batch, request, requested_outputs):
for array_key, tensor in requested_outputs.items():
spec = self.spec[array_key].copy()
spec.roi = request[array_key].roi
batch.arrays[array_key] = Array(tensor, spec)
def stop(self):
pass
| 3,564 | 32.317757 | 88 | py |
gunpowder | gunpowder-master/gunpowder/jax/nodes/train.py | import logging
import numpy as np
from gunpowder.ext import jax
from gunpowder.ext import jnp
import pickle
import os
from gunpowder.array import ArrayKey, Array
from gunpowder.array_spec import ArraySpec
from gunpowder.ext import tensorboardX, NoSuchModule
from gunpowder.nodes.generic_train import GenericTrain
from gunpowder.jax import GenericJaxModel
from typing import Dict, Union, Optional
logger = logging.getLogger(__name__)
class Train(GenericTrain):
"""JAX implementation of :class:`gunpowder.nodes.GenericTrain`.
Args:
model (subclass of ``gunpowder.jax.GenericJaxModel``):
The model to train. This model encapsulates the forward model,
loss, and optimizer.
inputs (``dict``, ``string`` -> Union[np.ndarray, ArrayKey]):
Dictionary from the names of input tensors expected by the
``train_step`` method to array keys or ndarray.
outputs (``dict``, ``string`` -> :class:`ArrayKey`):
Dictionary from the names of tensors in the network to array
keys. If the key is a string, the tensor will be retrieved
by checking the model for an attribute with they key as its name.
If the key is an integer, it is interpreted as a tuple index of
the outputs of the network.
New arrays will be generated by this node for each entry (if
requested downstream).
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`, optional):
Used to set the specs of generated arrays (at the moment only
``output``). This is useful to set the ``voxel_size``, for example,
if they differ from the voxel size of the input arrays. Only fields
that are not ``None`` in the given :class:`ArraySpec` will be used.
checkpoint_basename (``string``, optional):
The basename used for checkpoint files. Defaults to ``model``.
save_every (``int``, optional):
After how many iterations to create a checkpoint to store the
learnt weights.
keep_n_checkpoints (``int``, optional):
Number of checkpoints to keep. Node will attempt to delete older
checkpoints. Default is `None` (no deletion).
log_dir (``string``, optional):
Directory for saving tensorboard summaries.
log_every (``int``, optional):
After how many iterations to write out tensorboard summaries.
spawn_subprocess (``bool``, optional):
Whether to run the ``train_step`` in a separate process. Default is
false.
n_devices (``int``, optional):
Number of GPU devices to train on concurrently using `jax.pmap`. If
`None`, the number of available GPUs will be automatically detected
and used.
validate_fn (function -> Union[``float``, (``dict``, ``string`` -> ``float``)] , optional):
Function to run validation on, which should has the form of
def validate_fn(model, params)
where `model` is the same provided `GenericJaxModel` model and
`params` is the parameter of this model, and returns either a
``float`` (one loss) or a dictionary of losses to record in
tensorboard.
validate_every (``int``, optional):
After how many iterations to run `validate_fn`.
"""
def __init__(
self,
model: GenericJaxModel,
inputs: Dict[str, Union[np.ndarray, ArrayKey]],
outputs: Dict[Union[int, str], ArrayKey],
gradients: Dict[Union[int, str], ArrayKey] = {},
array_specs: Optional[Dict[ArrayKey, ArraySpec]] = None,
checkpoint_basename: str = "model",
save_every: int = 2000,
keep_n_checkpoints: Optional[int] = None,
log_dir: str = None,
log_every: int = 1,
spawn_subprocess: bool = False,
n_devices: Optional[int] = None,
validate_fn=None,
validate_every=None,
):
# not yet implemented
gradients = gradients
super(Train, self).__init__(
inputs, outputs, gradients, array_specs, spawn_subprocess=spawn_subprocess
)
self.model = model
self.checkpoint_basename = checkpoint_basename
self.save_every = save_every
if n_devices is None:
n_devices = jax.local_device_count() # autodetect available GPUs
self.n_devices = n_devices
self.local_devices = jax.local_devices()
self.keep_n_checkpoints = keep_n_checkpoints
self.iteration = 0
if not isinstance(tensorboardX, NoSuchModule) and log_dir is not None:
self.summary_writer = tensorboardX.SummaryWriter(log_dir)
self.log_every = log_every
else:
self.summary_writer = None
if log_dir is not None:
logger.warning("log_dir given, but tensorboardX is not installed")
self.intermediate_layers = {}
self.validate_fn = validate_fn
self.validate_every = validate_every
def replicate_params(self, params):
return jax.tree_map(lambda x: jnp.array([x] * self.n_devices), params)
def start(self):
checkpoint, self.iteration = self._get_latest_checkpoint(
self.checkpoint_basename
)
if checkpoint is not None:
logger.info("Resuming training from iteration %d", self.iteration)
with open(checkpoint, "rb") as f:
self.model_params = pickle.load(f)
if self.n_devices > 1:
self.model_params = self.replicate_params(self.model_params)
else:
logger.info("Starting training from scratch")
self.model_params = None
def split_inputs(self, inputs):
for k, arr in inputs.items():
assert arr.shape[0] % self.n_devices == 0, (
f"Batch size should be evenly divisible by the number of "
f"devices. Input array shape is {arr.shape} but n_device is"
f" {self.n_devices}"
)
inputs[k] = arr.reshape(
self.n_devices, arr.shape[0] // self.n_devices, *arr.shape[1:]
)
inputs[k] = [x for x in inputs[k]] # make a sequence for put_sharded
return inputs
def unstack_device_outputs(self, outputs):
for k, arr in outputs.items():
outputs[k] = arr.reshape(arr.shape[0] * arr.shape[1], *arr.shape[2:])
return outputs
def train_step(self, batch, request):
inputs = self.__collect_provided_inputs(batch)
if self.n_devices > 1:
inputs = self.split_inputs(inputs)
# put to device for max performance
if self.n_devices > 1:
for k, v in inputs.items():
inputs[k] = jax.device_put_sharded(v, jax.local_devices())
else:
for k, v in inputs.items():
inputs[k] = jax.device_put(v)
# initialize model if necessary
if self.model_params is None:
# Use the random seed of first request to initialize model's weight
rng = jax.random.PRNGKey(request.random_seed)
if self.n_devices > 1:
rng = jnp.broadcast_to(rng, (self.n_devices,) + rng.shape)
self.model_params = jax.pmap(self.model.initialize)(rng, inputs)
else:
self.model_params = self.model.initialize(rng, inputs)
requested_outputs = self.__collect_requested_outputs(request)
if self.n_devices > 1:
self.model_params, outputs, loss = jax.pmap(
self.model.train_step,
axis_name="num_devices",
donate_argnums=(0,),
static_broadcasted_argnums=(2,),
)(self.model_params, inputs, True)
loss = loss.mean()
outputs = self.unstack_device_outputs(outputs) # stack by batch
else:
self.model_params, outputs, loss = jax.jit(
self.model.train_step, donate_argnums=(0,), static_argnums=(2,)
)(self.model_params, inputs, False)
logger.debug("model outputs: %s", {k: v.shape for k, v in outputs.items()})
# add requested model outputs to batch
for array_key, array_name in requested_outputs.items():
spec = self.spec[array_key].copy()
spec.roi = request[array_key].roi
batch.arrays[array_key] = Array(outputs[array_name], spec)
if isinstance(loss, dict):
total = 0.0
for k, v in loss.items():
total += v
batch.loss = total
else:
batch.loss = loss
self.iteration += 1
batch.iteration = self.iteration
if batch.iteration % self.save_every == 0:
checkpoint_name = self._checkpoint_name(
self.checkpoint_basename, batch.iteration
)
logger.info("Creating checkpoint %s", checkpoint_name)
model_state = self.model_params
if self.n_devices > 1:
# get only a single copy of param for saving
model_state = jax.tree_map(lambda x: x[0], model_state)
with open(checkpoint_name, "wb") as f:
pickle.dump(model_state, f)
if self.keep_n_checkpoints:
checkpoint_name = self._checkpoint_name(
self.checkpoint_basename,
batch.iteration - self.keep_n_checkpoints * self.save_every,
)
try:
os.remove(checkpoint_name)
logger.info("Removed checkpoint %s", checkpoint_name)
except FileNotFoundError:
pass
if self.summary_writer and batch.iteration % self.log_every == 0:
if isinstance(loss, dict):
for k, v in loss.items():
self.summary_writer.add_scalar(k, v, batch.iteration)
else:
self.summary_writer.add_scalar("loss", loss, batch.iteration)
# run validate
if self.validate_fn is not None and batch.iteration % self.validate_every == 0:
val_ret = self.validate_fn(self.model, self.model_params)
if isinstance(val_ret, dict):
for k, v in val_ret.items():
self.summary_writer.add_scalar(k, v, batch.iteration)
else:
self.summary_writer.add_scalar("validate", val_ret, batch.iteration)
def __collect_requested_outputs(self, request):
array_outputs = {}
for output_name, array_key in self.outputs.items():
if array_key in request:
array_outputs[array_key] = output_name
return array_outputs
def __collect_provided_inputs(self, batch):
return self.__collect_provided_arrays(self.inputs, batch)
def __collect_provided_arrays(self, reference, batch):
arrays = {}
for array_name, array_key in reference.items():
if isinstance(array_key, ArrayKey):
msg = f"batch does not contain {array_key}, array {array_name} will not be set"
if array_key in batch.arrays:
arrays[array_name] = batch.arrays[array_key].data
else:
logger.warn(msg)
elif isinstance(array_key, np.ndarray):
arrays[array_name] = array_key
else:
raise Exception(
"Unknown network array key {}, can't be given to "
"network".format(array_key)
)
return arrays
| 11,796 | 36.690096 | 99 | py |
gunpowder | gunpowder-master/gunpowder/nodes/unsqueeze.py | import copy
from typing import List
import logging
import numpy as np
from gunpowder.array import ArrayKey
from gunpowder.batch import Batch
from gunpowder.batch_request import BatchRequest
from .batch_filter import BatchFilter
logger = logging.getLogger(__name__)
class Unsqueeze(BatchFilter):
"""Unsqueeze a batch at a given axis
Args:
arrays (List[ArrayKey]): ArrayKeys to unsqueeze.
axis: Position where the new axis is placed, defaults to 0.
"""
def __init__(self, arrays: List[ArrayKey], axis: int = 0):
self.arrays = arrays
self.axis = axis
def setup(self):
self.enable_autoskip()
for array in self.arrays:
self.updates(array, self.spec[array].copy())
def prepare(self, request):
deps = BatchRequest()
for array in self.arrays:
if array in request:
deps[array] = request[array].copy()
return deps
def process(self, batch, request):
outputs = Batch()
for array in self.arrays:
if array in batch:
if not batch[array].spec.nonspatial:
spatial_dims = request[array].roi.dims
if self.axis > batch[array].data.ndim - spatial_dims:
raise ValueError(
(
f"Unsqueeze.axis={self.axis} not permitted. "
"Unsqueeze only supported for "
"non-spatial dimensions of Array."
)
)
outputs[array] = batch[array]
outputs[array].data = np.expand_dims(batch[array].data, self.axis)
return outputs
| 1,757 | 29.310345 | 82 | py |
gunpowder | gunpowder-master/gunpowder/nodes/squeeze.py | import copy
from typing import List
import logging
import numpy as np
from gunpowder.array import ArrayKey
from gunpowder.batch_request import BatchRequest
from gunpowder.batch import Batch
from .batch_filter import BatchFilter
logger = logging.getLogger(__name__)
class Squeeze(BatchFilter):
"""Squeeze a batch at a given axis
Args:
arrays (List[ArrayKey]): ArrayKeys to squeeze.
axis: Position of the single-dimensional axis to remove, defaults to 0.
"""
def __init__(self, arrays: List[ArrayKey], axis: int = 0):
self.arrays = arrays
self.axis = axis
def setup(self):
self.enable_autoskip()
for array in self.arrays:
self.updates(array, self.spec[array].copy())
def prepare(self, request):
deps = BatchRequest()
for array in self.arrays:
if array in request:
deps[array] = request[array].copy()
return deps
def process(self, batch, request):
outputs = Batch()
for array in self.arrays:
if array in batch:
if not batch[array].spec.nonspatial:
spatial_dims = request[array].roi.dims
if self.axis >= batch[array].data.ndim - spatial_dims:
raise ValueError(
(
f"Squeeze.axis={self.axis} not permitted. "
"Squeeze only supported for "
"non-spatial dimensions of Array."
)
)
outputs[array] = batch[array]
outputs[array].data = np.squeeze(batch[array].data, self.axis)
logger.debug(f"{array} shape: {outputs[array].data.shape}")
return outputs
| 1,832 | 30.067797 | 79 | py |
gunpowder | gunpowder-master/gunpowder/nodes/intensity_augment.py | import numpy as np
from gunpowder.batch_request import BatchRequest
from .batch_filter import BatchFilter
class IntensityAugment(BatchFilter):
"""Randomly scale and shift the values of an intensity array.
Args:
array (:class:`ArrayKey`):
The intensity array to modify.
scale_min (``float``):
scale_max (``float``):
shift_min (``float``):
shift_max (``float``):
The min and max of the uniformly randomly drawn scaling and
shifting values for the intensity augmentation. Intensities are
changed as::
a = a.mean() + (a-a.mean())*scale + shift
z_section_wise (``bool``):
Perform the augmentation z-section wise. Requires 3D arrays and
assumes that z is the first dimension.
clip (``bool``):
Set to False if modified values should not be clipped to [0, 1]
Disables range check!
"""
def __init__(
self,
array,
scale_min,
scale_max,
shift_min,
shift_max,
z_section_wise=False,
clip=True,
):
self.array = array
self.scale_min = scale_min
self.scale_max = scale_max
self.shift_min = shift_min
self.shift_max = shift_max
self.z_section_wise = z_section_wise
self.clip = clip
def setup(self):
self.enable_autoskip()
self.updates(self.array, self.spec[self.array])
def prepare(self, request):
# TODO: move all randomness into the prepare method
# TODO: write a test for this node
np.random.seed(request.random_seed)
deps = BatchRequest()
deps[self.array] = request[self.array].copy()
return deps
def process(self, batch, request):
raw = batch.arrays[self.array]
assert (
not self.z_section_wise or raw.spec.roi.dims == 3
), "If you specify 'z_section_wise', I expect 3D data."
assert raw.data.dtype == np.float32 or raw.data.dtype == np.float64, (
"Intensity augmentation requires float types for the raw array (not "
+ str(raw.data.dtype)
+ "). Consider using Normalize before."
)
if self.clip:
assert (
raw.data.min() >= 0 and raw.data.max() <= 1
), "Intensity augmentation expects raw values in [0,1]. Consider using Normalize before."
if self.z_section_wise:
for z in range((raw.spec.roi / self.spec[self.array].voxel_size).shape[0]):
raw.data[z] = self.__augment(
raw.data[z],
np.random.uniform(low=self.scale_min, high=self.scale_max),
np.random.uniform(low=self.shift_min, high=self.shift_max),
)
else:
raw.data = self.__augment(
raw.data,
np.random.uniform(low=self.scale_min, high=self.scale_max),
np.random.uniform(low=self.shift_min, high=self.shift_max),
)
# clip values, we might have pushed them out of [0,1]
if self.clip:
raw.data[raw.data > 1] = 1
raw.data[raw.data < 0] = 0
def __augment(self, a, scale, shift):
return a.mean() + (a - a.mean()) * scale + shift
| 3,352 | 30.632075 | 101 | py |
gunpowder | gunpowder-master/gunpowder/nodes/elastic_augment.py | import logging
import math
import numpy as np
import random
from scipy import ndimage
from .batch_filter import BatchFilter
from gunpowder.batch_request import BatchRequest
from gunpowder.coordinate import Coordinate
from gunpowder.ext import augment
from gunpowder.roi import Roi
from gunpowder.array import ArrayKey
import warnings
logger = logging.getLogger(__name__)
class ElasticAugment(BatchFilter):
"""(DEPRICATED) Elasticly deform a batch. Requests larger batches upstream to avoid data
loss due to rotation and jitter.
Args:
control_point_spacing (``tuple`` of ``int``):
Distance between control points for the elastic deformation, in
voxels per dimension.
jitter_sigma (``tuple`` of ``float``):
Standard deviation of control point jitter distribution, in voxels
per dimension.
rotation_interval (``tuple`` of two ``floats``):
Interval to randomly sample rotation angles from (0, 2PI).
scale_interval (``tuple`` of two ``floats``):
Interval to randomly sample scale factors from.
prob_slip (``float``):
Probability of a section to "slip", i.e., be independently moved in
x-y.
prob_shift (``float``):
Probability of a section and all following sections to move in x-y.
max_misalign (``int``):
Maximal voxels to shift in x and y. Samples will be drawn
uniformly. Used if ``prob_slip + prob_shift`` > 0.
subsample (``int``):
Instead of creating an elastic transformation on the full
resolution, create one subsampled by the given factor, and linearly
interpolate to obtain the full resolution transformation. This can
significantly speed up this node, at the expense of having visible
piecewise linear deformations for large factors. Usually, a factor
of 4 can savely by used without noticable changes. However, the
default is 1 (i.e., no subsampling).
spatial_dims (``int``):
The number of spatial dimensions in arrays. Spatial dimensions are
assumed to be the last ones and cannot be more than 3 (default).
Set this value here to avoid treating channels as spacial
dimension. If, for example, your array is indexed as ``(c,y,x)``
(2D plus channels), you would want to set ``spatial_dims=2`` to
perform the elastic deformation only on x and y.
use_fast_points_transform (``bool``):
By solving for all of your points simultaneously with the following
3 step proceedure:
1) Rasterize nodes into numpy array
2) Apply elastic transform to array
3) Read out nodes via center of mass of transformed points
You can gain substantial speed up as opposed to calculating the
elastic transform for each point individually. However this may
lead to nodes being lost during the transform.
recompute_missing_points (``bool``):
Whether or not to compute the elastic transform node wise for nodes
that were lossed during the fast elastic transform process.
"""
def __init__(
self,
control_point_spacing,
jitter_sigma,
rotation_interval,
scale_interval=(1.0, 1.0),
prob_slip=0,
prob_shift=0,
max_misalign=0,
subsample=1,
spatial_dims=3,
use_fast_points_transform=False,
recompute_missing_points=True,
):
warnings.warn(
"ElasticAugment is deprecated, please use the DeformAugment",
DeprecationWarning,
)
self.control_point_spacing = control_point_spacing
self.jitter_sigma = jitter_sigma
self.rotation_start = rotation_interval[0]
self.rotation_max_amount = rotation_interval[1] - rotation_interval[0]
self.scale_min = scale_interval[0]
self.scale_max = scale_interval[1]
self.prob_slip = prob_slip
self.prob_shift = prob_shift
self.max_misalign = max_misalign
self.subsample = subsample
self.spatial_dims = spatial_dims
self.use_fast_points_transform = use_fast_points_transform
self.recompute_missing_points = recompute_missing_points
def prepare(self, request):
seed = request.random_seed
random.seed(seed)
# augment uses numpy for its randomness
np.random.seed(seed)
# get the voxel size
self.voxel_size = self.__get_common_voxel_size(request)
# get the total ROI of all requests
total_roi = request.get_total_roi()
logger.debug("total ROI is %s" % total_roi)
# First, get the total ROI of the request in spatial dimensions only.
# Channels and time don't matter. This is our master ROI.
# get master ROI
master_roi = Roi(
total_roi.begin[-self.spatial_dims :],
total_roi.shape[-self.spatial_dims :],
)
self.spatial_dims = master_roi.dims
logger.debug("master ROI is %s" % master_roi)
# make sure the master ROI aligns with the voxel size
master_roi = master_roi.snap_to_grid(self.voxel_size, mode="grow")
logger.debug("master ROI aligned with voxel size is %s" % master_roi)
# get master roi in voxels
master_roi_voxels = master_roi / self.voxel_size
logger.debug("master ROI in voxels is %s" % master_roi_voxels)
# Second, create a master transformation. This is a transformation that
# covers all voxels of the all requested ROIs. The master transformation
# is zero-based.
# create a transformation with the size of the master ROI in voxels
self.master_transformation = self.__create_transformation(
master_roi_voxels.shape
)
# Third, crop out parts of the master transformation for each of the
# smaller requested ROIs. Since these ROIs now have to align with the
# voxel size (which for points does not have to be the case), we also
# remember these smaller ROIs as target_rois in global world units.
# crop the parts corresponding to the requested ROIs
self.transformations = {}
self.target_rois = {}
deps = BatchRequest()
for key, spec in request.items():
spec = spec.copy()
if spec.roi is None:
continue
target_roi = Roi(
spec.roi.begin[-self.spatial_dims :],
spec.roi.shape[-self.spatial_dims :],
)
logger.debug("downstream request spatial ROI for %s is %s", key, target_roi)
# make sure the target ROI aligns with the voxel grid (which might
# not be the case for points)
target_roi = target_roi.snap_to_grid(self.voxel_size, mode="grow")
logger.debug(
"downstream request spatial ROI aligned with voxel grid for %s "
"is %s",
key,
target_roi,
)
# remember target ROI (this is where the transformation will project
# to)
self.target_rois[key] = target_roi
# get ROI in voxels
target_roi_voxels = target_roi / self.voxel_size
# get ROI relative to master ROI
target_roi_in_master_roi_voxels = (
target_roi_voxels - master_roi_voxels.begin
)
# crop out relevant part of transformation for this request
transformation = np.copy(
self.master_transformation[
(slice(None),) + target_roi_in_master_roi_voxels.get_bounding_box()
]
)
self.transformations[key] = transformation
# get ROI of all voxels necessary to perfrom transformation
#
# for that we follow the same transformations to get from the
# request ROI to the target ROI in master ROI in voxels, just in
# reverse
source_roi_in_master_roi_voxels = self.__get_source_roi(transformation)
source_roi_voxels = (
source_roi_in_master_roi_voxels + master_roi_voxels.begin
)
source_roi = source_roi_voxels * self.voxel_size
# transformation is still defined on voxels relative to master ROI
# in voxels (i.e., lowest source coordinate could be 5, but data
# array we get later starts at 0).
#
# shift transformation to be indexed relative to beginning of
# source_roi_voxels
self.__shift_transformation(
-source_roi_in_master_roi_voxels.begin, transformation
)
# update upstream request
spec.roi = Roi(
spec.roi.begin[: -self.spatial_dims]
+ source_roi.begin[-self.spatial_dims :],
spec.roi.shape[: -self.spatial_dims]
+ source_roi.shape[-self.spatial_dims :],
)
deps[key] = spec
logger.debug("upstream request roi for %s = %s" % (key, spec.roi))
return deps
def process(self, batch, request):
for array_key, array in batch.arrays.items():
if array_key not in self.target_rois:
continue
# for arrays, the target ROI and the requested ROI should be the
# same in spatial coordinates
assert (
self.target_rois[array_key].begin
== request[array_key].roi.begin[-self.spatial_dims :]
), "Target roi offset {} does not match request roi offset {}".format(
self.target_rois[array_key].begin,
request[array_key].roi.begin[-self.spatial_dims :],
)
assert (
self.target_rois[array_key].shape
== request[array_key].roi.shape[-self.spatial_dims :]
), "Target roi offset {} does not match request roi offset {}".format(
self.target_rois[array_key].shape,
request[array_key].roi.shape[-self.spatial_dims :],
)
# reshape array data into (channels,) + spatial dims
shape = array.data.shape
channel_shape = shape[: -self.spatial_dims]
data = array.data.reshape((-1,) + shape[-self.spatial_dims :])
# apply transformation on each channel
data = np.array(
[
augment.apply_transformation(
data[c],
self.transformations[array_key],
interpolate=self.spec[array_key].interpolatable,
)
for c in range(data.shape[0])
]
)
data_roi = request[array_key].roi / self.spec[array_key].voxel_size
array.data = data.reshape(
channel_shape + data_roi.shape[-self.spatial_dims :]
)
# restore original ROIs
array.spec.roi = request[array_key].roi
for graph_key, graph in batch.graphs.items():
nodes = list(graph.nodes)
if self.use_fast_points_transform:
missed_nodes = self.__fast_point_projection(
self.transformations[graph_key],
nodes,
graph.spec.roi,
target_roi=self.target_rois[graph_key],
)
if not self.recompute_missing_points:
for node in set(missed_nodes):
graph.remove_node(node, retain_connectivity=True)
missed_nodes = []
else:
missed_nodes = nodes
for node in missed_nodes:
# logger.debug("projecting %s", node.location)
# get location relative to beginning of upstream ROI
location = node.location - graph.spec.roi.begin
logger.debug("relative to upstream ROI: %s", location)
# get spatial coordinates of node in voxels
location_voxels = location[-self.spatial_dims :] / self.voxel_size
# get projected location in transformation data space, this
# yields voxel coordinates relative to target ROI
projected_voxels = self.__project(
self.transformations[graph_key], location_voxels
)
logger.debug(
"projected in voxels, relative to target ROI: %s", projected_voxels
)
if projected_voxels is None:
logger.debug("node outside of target, skipping")
graph.remove_node(node, retain_connectivity=True)
continue
# convert to world units (now in float again)
projected = projected_voxels * np.array(self.voxel_size)
logger.debug(
"projected in world units, relative to target ROI: %s", projected
)
# get global coordinates
projected += np.array(self.target_rois[graph_key].begin)
# update spatial coordinates of node location
node.location[-self.spatial_dims :] = projected
logger.debug("final location: %s", node.location)
# finally, it can happen that a node no longer is contained in
# the requested ROI (because larger ROIs than necessary have
# been requested upstream)
if not request[graph_key].roi.contains(node.location):
logger.debug("node outside of target, skipping")
graph.remove_node(node, retain_connectivity=True)
continue
# restore original ROIs
graph.spec.roi = request[graph_key].roi
def __get_common_voxel_size(self, request):
voxel_size = None
prev = None
for array_key in request.array_specs.keys():
if voxel_size is None:
voxel_size = self.spec[array_key].voxel_size[-self.spatial_dims :]
elif self.spec[array_key].voxel_size is not None:
assert (
voxel_size == self.spec[array_key].voxel_size[-self.spatial_dims :]
), (
"ElasticAugment can only be used with arrays of same voxel sizes, "
"but %s has %s, and %s has %s."
% (
array_key,
self.spec[array_key].voxel_size,
prev,
self.spec[prev].voxel_size,
)
)
prev = array_key
if voxel_size is None:
raise RuntimeError("voxel size must not be None")
return Coordinate(voxel_size)
def __create_transformation(self, target_shape):
scale = self.scale_min + random.random() * (self.scale_max - self.scale_min)
transformation = augment.create_identity_transformation(
target_shape, subsample=self.subsample, scale=scale
)
if sum(self.jitter_sigma) > 0:
transformation += augment.create_elastic_transformation(
target_shape,
self.control_point_spacing,
self.jitter_sigma,
subsample=self.subsample,
)
rotation = random.random() * self.rotation_max_amount + self.rotation_start
if rotation != 0:
transformation += augment.create_rotation_transformation(
target_shape, rotation, subsample=self.subsample
)
if self.subsample > 1:
transformation = augment.upscale_transformation(
transformation, target_shape
)
if self.prob_slip + self.prob_shift > 0:
self.__misalign(transformation)
return transformation
def __fast_point_projection(self, transformation, nodes, source_roi, target_roi):
if len(nodes) < 1:
return []
# rasterize the points into an array
ids, locs = zip(
*[
(
node.id,
(np.floor(node.location).astype(int) - source_roi.begin)
// self.voxel_size,
)
for node in nodes
if source_roi.contains(node.location)
]
)
ids, locs = np.array(ids), tuple(zip(*locs))
points_array = np.zeros(source_roi.shape / self.voxel_size, dtype=np.int64)
points_array[locs] = ids
# reshape array data into (channels,) + spatial dims
shape = points_array.shape
data = points_array.reshape((-1,) + shape[-self.spatial_dims :])
# apply transformation on each channel
data = np.array(
[
augment.apply_transformation(
data[c], transformation, interpolate="nearest"
)
for c in range(data.shape[0])
]
)
missing_points = []
projected_locs = ndimage.measurements.center_of_mass(data > 0, data, ids)
projected_locs = [
np.array(loc[-self.spatial_dims :]) * self.voxel_size + target_roi.begin
for loc in projected_locs
]
node_dict = {node.id: node for node in nodes}
for point_id, proj_loc in zip(ids, projected_locs):
point = node_dict.pop(point_id)
if not any([np.isnan(x) for x in proj_loc]):
assert (
len(proj_loc) == self.spatial_dims
), "projected location has wrong number of dimensions: {}, expected: {}".format(
len(proj_loc), self.spatial_dims
)
point.location[-self.spatial_dims :] = proj_loc
else:
missing_points.append(point)
for node in node_dict.values():
missing_points.append(point)
logger.debug(
"{} of {} points lost in fast points projection".format(
len(missing_points), len(ids)
)
)
return missing_points
def __project(self, transformation, location):
"""Find the projection of location given by transformation. Returns None
if projection lies outside of transformation."""
dims = len(location)
# subtract location from transformation
diff = transformation.copy()
for d in range(dims):
diff[d] -= location[d]
# square
diff2 = diff * diff
# sum
dist = diff2.sum(axis=0)
# find grid point closes to location
center_grid = Coordinate(np.unravel_index(dist.argmin(), dist.shape))
center_source = self.__source_at(transformation, center_grid)
logger.debug("projecting %s onto grid", location)
logger.debug("grid shape: %s", transformation.shape[1:])
logger.debug("grid projection: %s", center_grid)
logger.debug("dist shape: %s", dist.shape)
logger.debug("dist.argmin(): %s", dist.argmin())
logger.debug("dist[argmin]: %s", dist[center_grid])
logger.debug(
"transform[argmin]: %s", transformation[(slice(None),) + center_grid]
)
logger.debug("min dist: %s", dist.min())
logger.debug("center source: %s", center_source)
# inspect grid edges incident to center_grid
for d in range(dims):
# nothing to do for dimensions without spatial extent
if transformation.shape[1 + d] == 1:
continue
dim_vector = Coordinate(1 if dd == d else 0 for dd in range(dims))
pos_grid = center_grid + dim_vector
neg_grid = center_grid - dim_vector
logger.debug("interpolating along %s", dim_vector)
pos_u = -1
neg_u = -1
if pos_grid[d] < transformation.shape[1 + d]:
pos_source = self.__source_at(transformation, pos_grid)
logger.debug("pos source: %s", pos_source)
pos_dist = pos_source[d] - center_source[d]
loc_dist = location[d] - center_source[d]
if pos_dist != 0:
pos_u = loc_dist / pos_dist
else:
pos_u = 0
if neg_grid[d] >= 0:
neg_source = self.__source_at(transformation, neg_grid)
logger.debug("neg source: %s", neg_source)
neg_dist = neg_source[d] - center_source[d]
loc_dist = location[d] - center_source[d]
if neg_dist != 0:
neg_u = loc_dist / neg_dist
else:
neg_u = 0
logger.debug("pos u/neg u: %s/%s", pos_u, neg_u)
# if a point only falls behind edges, it lies outside of the grid
if pos_u < 0 and neg_u < 0:
return None
return np.array(center_grid, dtype=np.float32)
def __source_at(self, transformation, index):
"""Read the source point of a transformation at index."""
slices = (slice(None),) + tuple(slice(i, i + 1) for i in index)
return transformation[slices].flatten()
def __get_source_roi(self, transformation):
dims = transformation.shape[0]
# get bounding box of needed data for transformation
bb_min = Coordinate(
int(math.floor(transformation[d].min())) for d in range(dims)
)
bb_max = Coordinate(
int(math.ceil(transformation[d].max())) + 1 for d in range(dims)
)
# create roi sufficiently large to feed transformation
source_roi = Roi(bb_min, bb_max - bb_min)
return source_roi
def __shift_transformation(self, shift, transformation):
for d in range(transformation.shape[0]):
transformation[d] += shift[d]
def __misalign(self, transformation):
assert (
transformation.shape[0] == 3
), "misalign can only be applied to 3D volumes"
num_sections = transformation[0].shape[0]
shifts = [Coordinate((0, 0, 0))] * num_sections
for z in range(num_sections):
r = random.random()
if r <= self.prob_slip:
shifts[z] = self.__random_offset()
elif r <= self.prob_slip + self.prob_shift:
offset = self.__random_offset()
for zp in range(z, num_sections):
shifts[zp] += offset
logger.debug("misaligning sections with " + str(shifts))
dims = 3
bb_min = tuple(int(math.floor(transformation[d].min())) for d in range(dims))
bb_max = tuple(int(math.ceil(transformation[d].max())) + 1 for d in range(dims))
logger.debug("min/max of transformation: " + str(bb_min) + "/" + str(bb_max))
for z in range(num_sections):
transformation[1][z, :, :] += shifts[z][1]
transformation[2][z, :, :] += shifts[z][2]
bb_min = tuple(int(math.floor(transformation[d].min())) for d in range(dims))
bb_max = tuple(int(math.ceil(transformation[d].max())) + 1 for d in range(dims))
logger.debug(
"min/max of transformation after misalignment: "
+ str(bb_min)
+ "/"
+ str(bb_max)
)
def __random_offset(self):
return Coordinate(
(0,)
+ tuple(
self.max_misalign - random.randint(0, 2 * int(self.max_misalign))
for d in range(2)
)
)
| 24,005 | 36.924171 | 96 | py |
gunpowder | gunpowder-master/gunpowder/nodes/reject.py | import logging
import random
from .batch_filter import BatchFilter
from gunpowder.profiling import Timing
logger = logging.getLogger(__name__)
class Reject(BatchFilter):
"""Reject batches based on the masked-in vs. masked-out ratio.
If a pipeline also contains a :class:`RandomLocation` node,
:class:`Reject` needs to be placed downstream of it.
Args:
mask (:class:`ArrayKey`, optional):
The mask to use, if any.
min_masked (``float``, optional):
The minimal required ratio of masked-in vs. masked-out voxels.
Defaults to 0.5.
ensure_nonempty (:class:`GraphKey`, optional)
Ensures there is at least one point in the batch.
reject_probability (``float``, optional):
The probability by which a batch that is not valid (less than
min_masked) is actually rejected. Defaults to 1., i.e. strict
rejection.
"""
def __init__(
self, mask=None, min_masked=0.5, ensure_nonempty=None, reject_probability=1.0
):
self.mask = mask
self.min_masked = min_masked
self.ensure_nonempty = ensure_nonempty
self.reject_probability = reject_probability
def setup(self):
if self.mask:
assert self.mask in self.spec, (
"Reject can only be used if %s is provided" % self.mask
)
if self.ensure_nonempty:
assert self.ensure_nonempty in self.spec, (
"Reject can only be used if %s is provided" % self.ensure_nonempty
)
self.upstream_provider = self.get_upstream_provider()
def provide(self, request):
random.seed(request.random_seed)
report_next_timeout = 10
num_rejected = 0
timing = Timing(self)
timing.start()
if self.mask:
assert self.mask in request, (
"Reject can only be used if %s is provided" % self.mask
)
if self.ensure_nonempty:
assert self.ensure_nonempty in request, (
"Reject can only be used if %s is provided" % self.ensure_nonempty
)
have_good_batch = False
while not have_good_batch:
batch = self.upstream_provider.request_batch(request)
if self.mask:
mask_ratio = batch.arrays[self.mask].data.mean()
else:
mask_ratio = None
if self.ensure_nonempty:
num_points = len(list(batch.graphs[self.ensure_nonempty].nodes))
else:
num_points = None
have_min_mask = mask_ratio is None or mask_ratio > self.min_masked
have_points = num_points is None or num_points > 0
have_good_batch = have_min_mask and have_points
if not have_good_batch and self.reject_probability < 1.0:
have_good_batch = random.random() > self.reject_probability
if not have_good_batch:
if self.mask:
logger.debug(
"reject batch with mask ratio %f at %s",
mask_ratio,
batch.arrays[self.mask].spec.roi,
)
if self.ensure_nonempty:
logger.debug(
"reject batch with empty points in %s",
batch.graphs[self.ensure_nonempty].spec.roi,
)
num_rejected += 1
if timing.elapsed() > report_next_timeout:
logger.warning(
"rejected %d batches, been waiting for a good one " "since %ds",
num_rejected,
report_next_timeout,
)
report_next_timeout *= 2
else:
if self.mask:
logger.debug(
"accepted batch with mask ratio %f at %s",
mask_ratio,
batch.arrays[self.mask].spec.roi,
)
if self.ensure_nonempty:
logger.debug(
"accepted batch with nonempty points in %s",
self.ensure_nonempty,
)
timing.stop()
batch.profiling_stats.add(timing)
return batch
| 4,419 | 31.740741 | 88 | py |
gunpowder | gunpowder-master/gunpowder/nodes/astype.py | from .batch_filter import BatchFilter
from gunpowder.array import ArrayKey, Array
from gunpowder.batch import Batch
import logging
logger = logging.getLogger(__name__)
class AsType(BatchFilter):
"""Cast arrays to a different datatype (ex: np.float32 --> np.uint8).
Args:
source (:class:`ArrayKey`):
The key of the array to cast.
target_dtype (str or dtype):
The voxel size of the target.
target (:class:`ArrayKey`, optional):
The key of the array to store the cast ``source``.
"""
def __init__(self, source, target_dtype, target=None):
assert isinstance(source, ArrayKey)
if target is not None:
assert isinstance(target, ArrayKey)
self.target = target
else:
self.target = source
self.source = source
self.target_dtype = target_dtype
def setup(self):
spec = self.spec[self.source].copy()
spec.dtype = self.target_dtype
if self.target is not self.source:
self.provides(self.target, spec)
else:
self.updates(self.source, spec)
self.enable_autoskip()
def process(self, batch, request):
source = batch.arrays[self.source]
source_data = source.data
cast_data = source_data.astype(self.target_dtype)
target_spec = source.spec.copy()
target_spec.dtype = cast_data.dtype
target_array = Array(cast_data, target_spec)
# create output array
outputs = Batch()
outputs.arrays[self.target] = target_array
return outputs
| 1,628 | 24.857143 | 73 | py |
gunpowder | gunpowder-master/gunpowder/nodes/csv_points_source.py | import numpy as np
import logging
from gunpowder.batch import Batch
from gunpowder.coordinate import Coordinate
from gunpowder.nodes.batch_provider import BatchProvider
from gunpowder.graph import Node, Graph
from gunpowder.graph_spec import GraphSpec
from gunpowder.profiling import Timing
from gunpowder.roi import Roi
logger = logging.getLogger(__name__)
class CsvPointsSource(BatchProvider):
"""Read a set of points from a comma-separated-values text file. Each line
in the file represents one point, e.g. z y x (id)
Args:
filename (``string``):
The file to read from.
points (:class:`GraphKey`):
The key of the points set to create.
points_spec (:class:`GraphSpec`, optional):
An optional :class:`GraphSpec` to overwrite the points specs
automatically determined from the CSV file. This is useful to set
the :class:`Roi` manually.
scale (scalar or array-like):
An optional scaling to apply to the coordinates of the points read
from the CSV file. This is useful if the points refer to voxel
positions to convert them to world units.
ndims (``int``):
If ``ndims`` is None, all values in one line are considered as the
location of the point. If positive, only the first ``ndims`` are used.
If negative, all but the last ``-ndims`` are used.
id_dim (``int``):
Each line may optionally contain an id for each point. This parameter
specifies its location, has to come after the position values.
"""
def __init__(
self, filename, points, points_spec=None, scale=None, ndims=None, id_dim=None
):
self.filename = filename
self.points = points
self.points_spec = points_spec
self.scale = scale
self.ndims = ndims
self.id_dim = id_dim
self.data = None
def setup(self):
self._parse_csv()
if self.points_spec is not None:
self.provides(self.points, self.points_spec)
return
min_bb = Coordinate(np.floor(np.amin(self.data[:, : self.ndims], 0)))
max_bb = Coordinate(np.ceil(np.amax(self.data[:, : self.ndims], 0)) + 1)
roi = Roi(min_bb, max_bb - min_bb)
self.provides(self.points, GraphSpec(roi=roi))
def provide(self, request):
timing = Timing(self)
timing.start()
min_bb = request[self.points].roi.begin
max_bb = request[self.points].roi.end
logger.debug("CSV points source got request for %s", request[self.points].roi)
point_filter = np.ones((self.data.shape[0],), dtype=bool)
for d in range(self.ndims):
point_filter = np.logical_and(point_filter, self.data[:, d] >= min_bb[d])
point_filter = np.logical_and(point_filter, self.data[:, d] < max_bb[d])
points_data = self._get_points(point_filter)
points_spec = GraphSpec(roi=request[self.points].roi.copy())
batch = Batch()
batch.graphs[self.points] = Graph(points_data, [], points_spec)
timing.stop()
batch.profiling_stats.add(timing)
return batch
def _get_points(self, point_filter):
filtered = self.data[point_filter][:, : self.ndims]
if self.id_dim is not None:
ids = self.data[point_filter][:, self.id_dim]
else:
ids = np.arange(len(self.data))[point_filter]
return [Node(id=i, location=p) for i, p in zip(ids, filtered)]
def _parse_csv(self):
"""Read one point per line. If ``ndims`` is None, all values in one line
are considered as the location of the point. If positive, only the
first ``ndims`` are used. If negative, all but the last ``-ndims`` are
used.
"""
with open(self.filename, "r") as f:
self.data = np.array(
[[float(t.strip(",")) for t in line.split()] for line in f],
dtype=np.float32,
)
if self.ndims is None:
self.ndims = self.data.shape[1]
if self.scale is not None:
self.data[:, : self.ndims] *= self.scale
| 4,231 | 31.553846 | 86 | py |
gunpowder | gunpowder-master/gunpowder/nodes/klb_source.py | import copy
import logging
import numpy as np
import glob
from gunpowder.batch import Batch
from gunpowder.coordinate import Coordinate
from gunpowder.ext import pyklb
from gunpowder.profiling import Timing
from gunpowder.roi import Roi
from gunpowder.array import Array
from gunpowder.array_spec import ArraySpec
from .batch_provider import BatchProvider
logger = logging.getLogger(__name__)
class KlbSource(BatchProvider):
"""A `KLB <https://bitbucket.org/fernandoamat/keller-lab-block-filetype>`_
data source.
Provides a single array from the given KLB dataset.
Args:
filename (``string``):
The name of the KLB file. This string can be a glob expression
(e.g., ``frame_*.klb``), in which case all files that match are
sorted and stacked together to form an additional dimension (like
time). The additional dimension will start at 0 and have a default
voxel size of 1 (which can be overwritten using the ``array_spec``
argument).
array (:class:`ArrayKey`):
ArrayKey that this source offers.
array_spec (:class:`ArraySpec`, optional):
An optional :class:`ArraySpec` to overwrite the array specs
automatically determined from the KLB file. This is useful to set
``voxel_size``, for example. Only fields that are not ``None`` in
the given :class:`ArraySpec` will be used.
num_threads (``int``):
An optional integer to pass to pyklb reader indicating the number
of threads to use when reading klb files. Entering None causes
uses the pyklb default, which now is based on the number of cores
in the machine. This pyklb default is bad for jobs on the cluster that
are limited to the number of cores requested, and 1 is recommended.
"""
def __init__(self, filename, array, array_spec=None, num_threads=1):
self.filename = filename
self.array = array
self.array_spec = array_spec
self.num_threads = num_threads
self.files = None
self.ndims = None
def setup(self):
self.files = glob.glob(self.filename)
self.files.sort()
logger.info("Reading KLB headers of %d files...", len(self.files))
headers = [pyklb.readheader(f) for f in self.files]
spec = self.__read_spec(headers)
self.provides(self.array, spec)
def provide(self, request):
timing = Timing(self)
timing.start()
batch = Batch()
request_spec = request[self.array]
logger.debug("Reading %s in %s...", self.array, request_spec.roi)
voxel_size = self.spec[self.array].voxel_size
# scale request roi to voxel units
dataset_roi = request_spec.roi / voxel_size
# shift request roi into dataset
dataset_roi = dataset_roi - self.spec[self.array].roi.offset / voxel_size
# create array spec
array_spec = self.spec[self.array].copy()
array_spec.roi = request_spec.roi
# add array to batch
batch.arrays[self.array] = Array(self.__read(dataset_roi), array_spec)
logger.debug("done")
timing.stop()
batch.profiling_stats.add(timing)
return batch
def __read_spec(self, headers):
num_files = len(headers)
assert num_files > 0
common_header = headers[0]
for header in headers:
for attr in ["imagesize_tczyx", "pixelspacing_tczyx"]:
assert (common_header[attr] == header[attr]).all(), (
"Headers of provided KLB files differ in attribute %s" % attr
)
assert (
common_header["datatype"] == header["datatype"]
), "Headers of provided KLB files differ in attribute datatype"
size = Coordinate(common_header["imagesize_tczyx"])
voxel_size = Coordinate(common_header["pixelspacing_tczyx"])
dtype = common_header["datatype"]
# strip leading 1 dimensions
while size[0] == 1 and len(size) > 1:
size = size[1:]
voxel_size = voxel_size[1:]
# append num_files dimension
if num_files > 1:
size = (num_files,) + size
voxel_size = (1,) + voxel_size
dims = Coordinate(size)
self.ndims = len(dims)
if self.array_spec is not None:
spec = self.array_spec
else:
spec = ArraySpec()
if spec.voxel_size is None:
spec.voxel_size = Coordinate(voxel_size)
if spec.roi is None:
offset = Coordinate((0,) * self.ndims)
spec.roi = Roi(offset, dims * spec.voxel_size)
if spec.dtype is not None:
assert spec.dtype == dtype, (
"dtype %s provided in array_specs for %s, but differs from "
"dataset dtype %s"
% (self.array_specs[self.array].dtype, self.array, dataset.dtype)
)
else:
spec.dtype = dtype
if spec.interpolatable is None:
spec.interpolatable = spec.dtype in [
np.float32,
np.float64,
np.float128,
np.uint8, # assuming this is not used for labels
]
logger.warning(
"WARNING: You didn't set 'interpolatable' for %s. "
"Based on the dtype %s, it has been set to %s. "
"This might not be what you want.",
self.array,
spec.dtype,
spec.interpolatable,
)
return spec
def __read(self, roi):
if len(self.files) == 1:
return self.__read_file(self.files[0], roi)
else:
file_indices = range(roi.begin[0], roi.end[0])
file_roi = Roi(roi.begin[1:], roi.shape[1:])
return np.array(
[self.__read_file(self.files[i], file_roi) for i in file_indices]
)
def __read_file(self, filename, roi):
# pyklb reads max-inclusive, gunpowder rois are max exclusive ->
# subtract (1, 1, ...) from max coordinate
if self.num_threads:
return pyklb.readroi(
filename,
roi.begin,
roi.end - (1,) * roi.dims,
numthreads=self.num_threads,
)
else:
return pyklb.readroi(filename, roi.begin, roi.end - (1,) * roi.dims)
def __repr__(self):
return self.filename
| 6,603 | 31.372549 | 82 | py |
gunpowder | gunpowder-master/gunpowder/nodes/resample.py | from .batch_filter import BatchFilter
from gunpowder.array import ArrayKey, Array
from gunpowder.batch_request import BatchRequest
from gunpowder.batch import Batch
from gunpowder.coordinate import Coordinate
from gunpowder.roi import Roi
from skimage.transform import rescale
import numpy as np
import logging
logger = logging.getLogger(__name__)
class Resample(BatchFilter):
"""Up- or downsample arrays in a batch to match a given voxel size. Note: Behavior is not a pixel-perfect copy of down/upsample nodes, because this node relies on skimage.transform.rescale to perform non-integer scaling factors.
Args:
source (:class:`ArrayKey`):
The key of the array to resample.
target_voxel_size (:class:`Coordinate`):
The voxel size of the target.
target (:class:`ArrayKey`):
The key of the array to store the resampled ``source``.
ndim (``int``, optional):
Dimensionality of upsampling. This allows users to, for instance, specify against
resampling in unused z-dimension when processing slices of anisotropic data.
Default is to use the dimensionality of ``target_voxel_size``.
interp_order (``int``, optional):
The order of interpolation. The order has to be in the range 0-5:
0: Nearest-neighbor
1: Bi-linear (default)
2: Bi-quadratic
3: Bi-cubic
4: Bi-quartic
5: Bi-quintic
Default is 0 if image.dtype is bool or interpolatable is False, and 1 otherwise.
"""
def __init__(self, source, target_voxel_size, target, ndim=None, interp_order=None):
assert isinstance(source, ArrayKey)
assert isinstance(target, ArrayKey)
self.source = source
self.target_voxel_size = Coordinate(target_voxel_size)
self.target = target
if ndim is None:
self.ndim = len(target_voxel_size)
else:
self.ndim = ndim
self.interp_order = interp_order
def setup(self):
spec = self.spec[self.source].copy()
source_voxel_size = self.spec[self.source].voxel_size
spec.voxel_size = self.target_voxel_size
self.pad = Coordinate(
(0,) * (len(source_voxel_size) - self.ndim)
+ source_voxel_size[-self.ndim :]
)
spec.roi = spec.roi.grow(
-self.pad, -self.pad
) # Pad w/ 1 voxel per side for interpolation to avoid edge effects
spec.roi = spec.roi.snap_to_grid(
np.lcm(source_voxel_size, self.target_voxel_size), mode="shrink"
)
self.provides(self.target, spec)
self.enable_autoskip()
def prepare(self, request):
source_voxel_size = self.spec[self.source].voxel_size
source_request = request[self.target].copy()
source_request.voxel_size = source_voxel_size
source_request.roi = source_request.roi.grow(
self.pad, self.pad
) # Pad w/ 1 voxel per side for interpolation to avoid edge effects
source_request.roi = source_request.roi.snap_to_grid(
np.lcm(source_voxel_size, self.target_voxel_size), mode="grow"
)
source_request.roi = source_request.roi.intersect(
self.spec[self.source].roi
).snap_to_grid(np.lcm(source_voxel_size, self.target_voxel_size), mode="shrink")
deps = BatchRequest()
deps[self.source] = source_request
return deps
def process(self, batch, request):
source = batch.arrays[self.source]
source_data = source.data
source_voxel_size = self.spec[self.source].voxel_size
scales = np.array(source_voxel_size) / np.array(self.target_voxel_size)
scales = (1,) * (source_data.ndim - source_voxel_size.dims) + tuple(scales)
if self.interp_order != 0 and (
self.spec[self.source].interpolatable
or self.spec[self.source].interpolatable is None
):
resampled_data = rescale(
source_data.astype(np.float32), scales, order=self.interp_order
).astype(source_data.dtype)
else: # Force nearest-neighbor interpolation for non-interpolatable arrays
if self.interp_order is not None and self.interp_order != 0:
logger.warning(
"Interpolation other than nearest-neighbor requested for non-interpolatable array. Using nearest-neighbor instead."
)
resampled_data = rescale(
source_data.astype(np.float32), scales, order=0, anti_aliasing=False
).astype(source_data.dtype)
target_spec = source.spec.copy()
target_spec.roi = Roi(
source.spec.roi.get_begin(),
self.target_voxel_size
* Coordinate(resampled_data.shape[-self.target_voxel_size.dims :]),
)
target_spec.voxel_size = self.target_voxel_size
target_spec.dtype = resampled_data.dtype
target_array = Array(resampled_data, target_spec)
target_array.crop(request[self.target].roi)
# create output array
outputs = Batch()
outputs.arrays[self.target] = target_array
return outputs
| 5,299 | 36.323944 | 232 | py |
gunpowder | gunpowder-master/gunpowder/nodes/noise_augment.py | import numpy as np
import skimage
from gunpowder.batch_request import BatchRequest
from .batch_filter import BatchFilter
class NoiseAugment(BatchFilter):
"""Add random noise to an array. Uses the scikit-image function skimage.util.random_noise.
See scikit-image documentation for more information on arguments and additional kwargs.
Args:
array (:class:`ArrayKey`):
The intensity array to modify. Should be of type float and within range [-1, 1] or [0, 1].
mode (``string``):
Type of noise to add, see scikit-image documentation.
seed (``int``):
Optionally set a random seed, see scikit-image documentation.
clip (``bool``):
Whether to preserve the image range (either [-1, 1] or [0, 1]) by clipping values in the end, see
scikit-image documentation
"""
def __init__(self, array, mode="gaussian", seed=None, clip=True, **kwargs):
self.array = array
self.mode = mode
self.seed = seed
self.clip = clip
self.kwargs = kwargs
def setup(self):
self.enable_autoskip()
self.updates(self.array, self.spec[self.array])
def prepare(self, request):
deps = BatchRequest()
deps[self.array] = request[self.array].copy()
return deps
def process(self, batch, request):
raw = batch.arrays[self.array]
assert raw.data.dtype == np.float32 or raw.data.dtype == np.float64, (
"Noise augmentation requires float types for the raw array (not "
+ str(raw.data.dtype)
+ "). Consider using Normalize before."
)
if self.clip:
assert (
raw.data.min() >= -1 and raw.data.max() <= 1
), "Noise augmentation expects raw values in [-1,1] or [0,1]. Consider using Normalize before."
raw.data = skimage.util.random_noise(
raw.data, mode=self.mode, seed=self.seed, clip=self.clip, **self.kwargs
).astype(raw.data.dtype)
| 2,041 | 30.90625 | 109 | py |
gunpowder | gunpowder-master/gunpowder/nodes/renumber_connected_components.py | from .batch_filter import BatchFilter
from gunpowder.ext import malis
class RenumberConnectedComponents(BatchFilter):
"""Find connected components of the same value, and replace each component
with a new label.
Args:
labels (:class:`ArrayKey`):
The label array to modify.
"""
def __init__(self, labels):
self.labels = labels
def process(self, batch, request):
components = batch.arrays[self.labels].data
dtype = components.dtype
simple_neighborhood = malis.mknhood3d()
affinities_from_components = malis.seg_to_affgraph(
components, simple_neighborhood
)
components, _ = malis.connected_components_affgraph(
affinities_from_components, simple_neighborhood
)
batch.arrays[self.labels].data = components.astype(dtype)
| 863 | 27.8 | 78 | py |
gunpowder | gunpowder-master/gunpowder/nodes/batch_provider.py | import numpy as np
import copy
import logging
import random
from gunpowder.coordinate import Coordinate
from gunpowder.provider_spec import ProviderSpec
from gunpowder.array import ArrayKey
from gunpowder.array_spec import ArraySpec
from gunpowder.graph import GraphKey
from gunpowder.graph_spec import GraphSpec
logger = logging.getLogger(__name__)
class BatchRequestError(Exception):
def __init__(self, provider, request, batch):
self.provider = provider
self.request = request
self.batch = batch
def __str__(self):
return (
f"Exception in {self.provider.name()} while processing request"
f"{self.request} \n"
"Batch returned so far:\n"
f"{self.batch}"
)
class BatchProvider(object):
"""Superclass for all nodes in a `gunpowder` graph.
A :class:`BatchProvider` provides :class:`Batches<Batch>` containing
:class:`Arrays<Array>` and/or :class:`Graph`. The available data is
specified in a :class:`ProviderSpec` instance, accessible via :attr:`spec`.
To create a new node, subclass this class and implement (at least)
:func:`setup` and :func:`provide`.
A :class:`BatchProvider` can be linked to any number of other
:class:`BatchProviders<BatchProvider>` upstream. If your node accepts
exactly one upstream provider, consider subclassing :class:`BatchFilter`
instead.
"""
def add_upstream_provider(self, provider):
self.get_upstream_providers().append(provider)
return provider
def remove_upstream_providers(self):
self.upstream_providers = []
def get_upstream_providers(self):
if not hasattr(self, "upstream_providers"):
self.upstream_providers = []
return self.upstream_providers
@property
def remove_placeholders(self):
if not hasattr(self, "_remove_placeholders"):
return True
return self._remove_placeholders
def setup(self):
"""To be implemented in subclasses.
Called during initialization of the DAG. Callees can assume that all
upstream providers are set up already.
In setup, call :func:`provides` to announce the arrays and points
provided by this node.
"""
raise NotImplementedError("Class %s does not implement 'setup'" % self.name())
def teardown(self):
"""To be implemented in subclasses.
Called during destruction of the DAG. Subclasses should use this to
stop worker processes, if they used some.
"""
pass
def provides(self, key, spec):
"""Introduce a new output provided by this :class:`BatchProvider`.
Implementations should call this in their :func:`setup` method, which
will be called when the pipeline is build.
Args:
key (:class:`ArrayKey` or :class:`GraphKey`):
The array or point set key provided.
spec (:class:`ArraySpec` or :class:`GraphSpec`):
The spec of the array or point set provided.
"""
logger.debug("Current spec of %s:\n%s", self.name(), self.spec)
if self.spec is None:
self._spec = ProviderSpec()
assert (
key not in self.spec
), "Node %s is trying to add spec for %s, but is already " "provided." % (
type(self).__name__,
key,
)
self.spec[key] = copy.deepcopy(spec)
self.provided_items.append(key)
logger.debug("%s provides %s with spec %s", self.name(), key, spec)
def _init_spec(self):
if not hasattr(self, "_spec"):
self._spec = None
def internal_teardown(self):
logger.debug("Resetting spec of %s", self.name())
self._spec = None
self._provided_items = []
self.teardown()
@property
def spec(self):
"""Get the :class:`ProviderSpec` of this :class:`BatchProvider`.
Note that the spec is only available after the pipeline has been build.
Before that, it is ``None``.
"""
self._init_spec()
return self._spec
@property
def provided_items(self):
"""Get a list of the keys provided by this :class:`BatchProvider`.
This list is only available after the pipeline has been build. Before
that, it is empty.
"""
if not hasattr(self, "_provided_items"):
self._provided_items = []
return self._provided_items
def remove_provided(self, request):
"""Remove keys from `request` that are provided by this
:class:`BatchProvider`.
"""
for key in self.provided_items:
if key in request:
del request[key]
def request_batch(self, request):
"""Request a batch from this provider.
Args:
request (:class:`BatchRequest`):
A request containing (possibly partial)
:class:`ArraySpecs<ArraySpec>` and
:class:`GraphSpecs<GraphSpec>`.
"""
batch = None
try:
request._update_random_seed()
self.set_seeds(request)
logger.debug("%s got request %s", self.name(), request)
self.check_request_consistency(request)
upstream_request = request.copy()
if self.remove_placeholders:
upstream_request.remove_placeholders()
batch = self.provide(upstream_request)
request.remove_placeholders()
self.check_batch_consistency(batch, request)
self.remove_unneeded(batch, request)
logger.debug("%s provides %s", self.name(), batch)
except Exception as e:
raise BatchRequestError(self, request, batch) from e
return batch
def set_seeds(self, request):
seed = request.random_seed
random.seed(seed)
# augment uses numpy for its randomness
np.random.seed(seed)
def check_request_consistency(self, request):
for key, request_spec in request.items():
assert (
key in self.spec
), "%s: Asked for %s which this node does not provide" % (self.name(), key)
assert (
isinstance(request_spec, ArraySpec)
or isinstance(request_spec, GraphSpec)
or isinstance(request_spec, GraphSpec)
), "spec for %s is of type" "%s" % (key, type(request_spec))
provided_spec = self.spec[key]
provided_roi = provided_spec.roi
request_roi = request_spec.roi
if provided_roi is not None:
assert provided_roi.contains(
request_roi
), "%s: %s's ROI %s outside of my ROI %s" % (
self.name(),
key,
request_roi,
provided_roi,
)
if isinstance(key, ArrayKey):
if request_spec.voxel_size is not None:
assert provided_spec.voxel_size == request_spec.voxel_size, (
"%s: voxel size %s requested for %s, but this node provides %s"
% (
self.name(),
request_spec.voxel_size,
key,
provided_spec.voxel_size,
)
)
if request_roi is not None and provided_spec.voxel_size is not None:
for d in range(request_roi.dims):
assert (
request_roi.shape[d] % provided_spec.voxel_size[d] == 0
), (
"in request %s, dimension %d of request %s is not a multiple of voxel_size %d"
% (request, d, key, provided_spec.voxel_size[d])
)
if isinstance(key, GraphKey):
if request_spec.directed is not None:
assert request_spec.directed == provided_spec.directed, (
f"asked for {key}: directed={request_spec.directed} but "
f"{self.name()} provides directed={provided_spec.directed}"
)
def check_batch_consistency(self, batch, request):
for array_key, request_spec in request.array_specs.items():
assert (
array_key in batch.arrays
), "%s requested, but %s did not provide it." % (array_key, self.name())
array = batch.arrays[array_key]
assert (
array.spec.roi == request_spec.roi
), "%s ROI %s requested, but ROI %s provided by %s." % (
array_key,
request_spec.roi,
array.spec.roi,
self.name(),
)
assert (
array.spec.voxel_size == self.spec[array_key].voxel_size
), "voxel size of %s announced, but %s " "delivered for %s" % (
self.spec[array_key].voxel_size,
array.spec.voxel_size,
array_key,
)
# ensure that the spatial dimensions are the same (other dimensions
# on top are okay, e.g., for affinities)
if request_spec.roi is not None:
dims = request_spec.roi.dims
data_shape = Coordinate(array.data.shape[-dims:])
voxel_size = self.spec[array_key].voxel_size
assert data_shape == request_spec.roi.shape / voxel_size, (
"%s ROI %s requested, but size of array is %s*%s=%s provided by %s."
% (
array_key,
request_spec.roi,
data_shape,
voxel_size,
data_shape * voxel_size,
self.name(),
)
)
if request_spec.dtype is not None:
assert (
batch[array_key].data.dtype == request_spec.dtype
), "dtype of array %s (%s) does not match requested dtype %s by %s" % (
array_key,
batch[array_key].data.dtype,
request_spec.dtype,
self.name(),
)
for graph_key, request_spec in request.graph_specs.items():
assert (
graph_key in batch.graphs
), "%s requested, but %s did not provide it." % (graph_key, self.name())
graph = batch.graphs[graph_key]
assert (
graph.spec.roi == request_spec.roi
), "%s ROI %s requested, but ROI %s provided by %s." % (
graph_key,
request_spec.roi,
graph.spec.roi,
self.name(),
)
if request_spec.directed is not None:
assert request_spec.directed == graph.directed, (
f"Recieved {graph_key}: directed={graph.directed} but "
f"{self.name()} should provide directed={request_spec.directed}"
)
for node in graph.nodes:
contained = graph.spec.roi.contains(node.location)
dangling = not contained and all(
[graph.spec.roi.contains(v.location) for v in graph.neighbors(node)]
)
assert contained or dangling, (
f"graph {graph_key} provided by {self.name()} with ROI {graph.spec.roi} "
f"contain point at {node.location} which is neither contained nor "
f"'dangling'"
)
def remove_unneeded(self, batch, request):
batch_keys = set(list(batch.arrays.keys()) + list(batch.graphs.keys()))
for key in batch_keys:
if key not in request:
del batch[key]
def enable_placeholders(self):
self._remove_placeholders = False
def provide(self, request):
"""To be implemented in subclasses.
This function takes a :class:`BatchRequest` and should return the
corresponding :class:`Batch`.
Args:
request(:class:`BatchRequest`):
The request to process.
"""
raise NotImplementedError("Class %s does not implement 'provide'" % self.name())
def name(self):
return type(self).__name__
def __repr__(self):
return self.name() + ", providing: " + str(self.spec)
def __add__(self, other):
"""Support ``self + other`` operator. Return a :class:`Pipeline`."""
from gunpowder import Pipeline
if isinstance(other, BatchProvider):
other = Pipeline(other)
if not isinstance(other, Pipeline):
raise RuntimeError(
f"Don't know how to add {type(other)} to BatchProvider "
f"{self.name()}"
)
return Pipeline(self) + other
def __radd__(self, other):
"""Support ``other + self`` operator. Return a :class:`Pipeline`."""
from gunpowder import Pipeline
if isinstance(other, BatchProvider):
return Pipeline(other) + Pipeline(self)
if isinstance(other, tuple):
return other + Pipeline(self)
raise RuntimeError(
f"Don't know how to radd {type(other)} to BatchProvider" f"{self.name()}"
)
| 13,607 | 33.105263 | 106 | py |
gunpowder | gunpowder-master/gunpowder/nodes/iterate_locations.py | import logging
import multiprocessing as mp
from random import randrange
from .batch_filter import BatchFilter
from gunpowder.batch_request import BatchRequest
from gunpowder.coordinate import Coordinate
from gunpowder.array import Array
from gunpowder.array_spec import ArraySpec
logger = logging.getLogger(__name__)
class IterateLocations(BatchFilter):
"""Iterates over the nodes in a graph and centers
batches at their locations. The iteration is thread safe.
Args:
graph (:class:`GraphKey`): Key of graph to read nodes from
roi (:class:`Roi`): Roi within which to read and iterate over nodes.
Defaults to None, which queries the whole Roi of the upstream graph
source
node_id (:class:`ArrayKey`, optional): Nonspatial array key in which to
store the id of the "current" node in graph. Default is None, in
which case no attribute is stored and there is no way to tell which
node is being considered.
choose_randomly (bool): If true, choose nodes randomly with
replacement. Default is false, which loops over the list.
"""
__global_index = mp.Value("i", -1)
visited_all = mp.Value("b", False)
def __init__(self, graph, roi=None, node_id=None, choose_randomly=False):
self.graph = graph
self.roi = roi
self.node_id = node_id
self.choose_randomly = choose_randomly
self.nodes = None
self.coordinates = None
self.local_index = None
self.shift = None
def setup(self):
upstream = self.get_upstream_provider()
self.upstream_spec = upstream.spec
assert self.graph in self.upstream_spec, (
"Upstream provider does not have graph %s" % self.graph
)
query_spec = self.upstream_spec.graph_specs[self.graph].copy()
if self.roi:
query_spec.roi = query_spec.roi.intersect(self.roi)
# TODO: For scalability, scan upstream roi in blocks instead of
# storing all nodes in memory
logger.info("Requesting all %s points in roi %s", self.graph, query_spec.roi)
upstream_request = BatchRequest({self.graph: query_spec})
upstream_batch = upstream.request_batch(upstream_request)
self.nodes = list(upstream_batch[self.graph].nodes)
self.coordinates = [node.location for node in self.nodes]
assert (
len(self.coordinates) > 0
), "Graph %s doesn't have nodes to iterate over in roi %s" % (
self.graph,
self.roi,
)
# clear bounding boxes of all provided arrays and points
for key, spec in self.spec.items():
if spec.roi is not None:
spec.roi.shape = Coordinate((None,) * spec.roi.dims)
self.updates(key, spec)
if self.node_id is not None:
self.provides(self.node_id, ArraySpec(nonspatial=True))
def prepare(self, request):
logger.debug("request: %s", request.array_specs)
logger.debug("my spec: %s", self.spec)
lcm_voxel_size = self.spec.get_lcm_voxel_size(request.array_specs.keys())
if lcm_voxel_size is None:
ndims = len(self.coordinates[0])
lcm_voxel_size = Coordinate((1,) * ndims)
# shift to center
total_roi = request.get_total_roi()
request_center = total_roi.shape / 2 + total_roi.offset
self.shift = self._get_next_shift(request_center, lcm_voxel_size)
max_tries = 15
tries = 0
while not self.__check_shift(request):
logger.warning(
"Location %s (shift %s) skipped"
% (self.coordinates[self.local_index], self.shift)
)
assert tries < max_tries, (
"Unable to find valid shift after %d tries",
tries,
)
self.shift = self._get_next_shift(request_center, lcm_voxel_size)
tries += 1
# Set shift for all requests
for specs_type in [request.array_specs, request.graph_specs]:
for key, spec in specs_type.items():
if isinstance(spec, ArraySpec) and spec.nonspatial:
continue
roi = spec.roi.shift(self.shift)
specs_type[key].roi = roi
logger.debug(
"{}'th ({}) shift selected: {}".format(
self.local_index, self.coordinates[self.local_index], self.shift
)
)
def process(self, batch, request):
if self.node_id:
node_id = self.nodes[self.local_index].id
spec = self.spec[self.node_id].copy()
batch[self.node_id] = Array([node_id], spec)
# reset ROIs to request
for array_key, spec in request.array_specs.items():
batch.arrays[array_key].spec.roi = spec.roi
for graph_key, spec in request.graph_specs.items():
batch.graphs[graph_key].spec.roi = spec.roi
# change shift point locations to lie within roi
for graph_key in request.graph_specs.keys():
batch.graphs[graph_key].shift(-self.shift)
def _get_next_shift(self, center_shift, voxel_size):
# gets next coordinate from list
if self.choose_randomly:
self.local_index = randrange(len(self.coordinates))
else:
with IterateLocations.__global_index.get_lock():
IterateLocations.__global_index.value += 1
if IterateLocations.__global_index.value == len(self.coordinates) - 1:
logger.info("After this request, all points have been visited")
with IterateLocations.visited_all.get_lock():
IterateLocations.visited_all.value = True
if IterateLocations.__global_index.value == len(self.coordinates):
logger.warning("Ran out of locations, looping list")
self.local_index = IterateLocations.__global_index.value % len(
self.coordinates
)
next_shift = Coordinate(self.coordinates[self.local_index]) - center_shift
logger.debug("Shift before rounding: %s" % str(next_shift))
# make sure shift is a multiple of voxel size (round to nearest)
next_shift = Coordinate(
[
int(vs * round(float(shift) / vs))
for vs, shift in zip(voxel_size, next_shift)
]
)
logger.debug("Shift after rounding: %s" % str(next_shift))
return next_shift
def __check_shift(self, request):
for key, spec in request.items():
if isinstance(spec, ArraySpec) and spec.nonspatial:
continue
request_roi = spec.roi
if key in self.upstream_spec:
provided_roi = self.upstream_spec[key].roi
else:
raise Exception("Requested %s, but upstream does not provide it." % key)
shifted_roi = request_roi.shift(self.shift)
if not provided_roi.contains(shifted_roi):
logger.warning(
("Provided roi %s for key %s does notcontain" " shifted roi %s"),
provided_roi,
key,
shifted_roi,
)
return False
return True
| 7,416 | 39.091892 | 88 | py |
gunpowder | gunpowder-master/gunpowder/nodes/crop.py | import copy
import logging
from .batch_filter import BatchFilter
from gunpowder.coordinate import Coordinate
logger = logging.getLogger(__name__)
class Crop(BatchFilter):
"""Limits provided ROIs by either giving a new :class:`Roi` or crop
fractions from either face of the provided ROI.
Args:
key (:class:`ArrayKey` or :class:`GraphKey`):
The key of the array or points set to modify.
roi (:class:`Roi` or ``None``):
The ROI to crop to.
fraction_negative (``tuple`` of ``float``):
Relative crop starting from the negative end of the provided ROI.
fraction_positive (``tuple`` of ``float``):
Relative crop starting from the positive end of the provided ROI.
"""
def __init__(self, key, roi=None, fraction_negative=None, fraction_positive=None):
if roi is not None and (
fraction_positive is not None or fraction_negative is not None
):
raise RuntimeError(
"'roi' and 'fraction_...' arguments can not be given together"
)
if (roi, fraction_positive, fraction_negative) == (None, None, None):
raise RuntimeError("One of 'roi' and 'fraction_...' has to be given")
if fraction_negative is not None and fraction_positive is None:
fraction_positive = (0.0,) * len(fraction_negative)
if fraction_positive is not None and fraction_negative is None:
fraction_negative = (0.0,) * len(fraction_positive)
self.key = key
self.roi = roi
self.fraction_negative = fraction_negative
self.fraction_positive = fraction_positive
def setup(self):
spec = self.spec[self.key]
if self.roi is not None:
assert spec.roi.contains(
self.roi
), "Crop ROI is not contained in upstream ROI."
cropped_roi = self.roi
else:
total_fraction = tuple(
n + p for n, p in zip(self.fraction_negative, self.fraction_positive)
)
if max(total_fraction) >= 1:
raise RuntimeError("Sum of crop fractions exeeds 1")
crop_positive = Coordinate(
a * b for a, b in zip(spec.roi.shape, self.fraction_positive)
)
crop_negative = Coordinate(
a * b for a, b in zip(spec.roi.shape, self.fraction_negative)
)
cropped_roi = spec.roi.grow(-crop_positive, -crop_negative)
spec.roi = cropped_roi
self.updates(self.key, spec)
def process(self, batch, request):
pass
| 2,655 | 30.619048 | 86 | py |
gunpowder | gunpowder-master/gunpowder/nodes/scan.py | import logging
import multiprocessing
import numpy as np
import tqdm
from gunpowder.array import Array
from gunpowder.batch import Batch
from gunpowder.coordinate import Coordinate
from gunpowder.graph import Graph
from gunpowder.producer_pool import ProducerPool
from gunpowder.roi import Roi
from .batch_filter import BatchFilter
logger = logging.getLogger(__name__)
class Scan(BatchFilter):
"""Iteratively requests batches of size ``reference`` from upstream
providers in a scanning fashion, until all requested ROIs are covered. If
the batch request to this node is empty, it will scan the complete upstream
ROIs (and return nothing). Otherwise, it scans only the requested ROIs and
returns a batch assembled of the smaller requests. In either case, the
upstream requests will be contained in the downstream requested ROI or
upstream ROIs.
See also :class:`Hdf5Write`.
Args:
reference (:class:`BatchRequest`):
A reference :class:`BatchRequest`. This request will be shifted in
a scanning fashion over the upstream ROIs of the requested arrays
or points.
num_workers (``int``, optional):
If set to >1, upstream requests are made in parallel with that
number of workers.
cache_size (``int``, optional):
If multiple workers are used, how many batches to hold at most.
"""
def __init__(self, reference, num_workers=1, cache_size=50):
self.reference = reference.copy()
self.num_workers = num_workers
self.cache_size = cache_size
self.workers = None
self.batch = None
def setup(self):
if self.num_workers > 1:
self.request_queue = multiprocessing.Queue(maxsize=0)
self.workers = ProducerPool(
[self._worker_get_chunk for _ in range(self.num_workers)],
queue_size=self.cache_size,
)
self.workers.start()
def teardown(self):
if self.num_workers > 1:
self.workers.stop()
def provide(self, request):
empty_request = len(request) == 0
if empty_request:
scan_spec = self.spec
else:
scan_spec = request
stride = self._get_stride()
shift_roi = self._get_shift_roi(scan_spec)
shifts = self._enumerate_shifts(shift_roi, stride)
num_chunks = len(shifts)
logger.info("scanning over %d chunks", num_chunks)
# the batch to return
self.batch = Batch()
if self.num_workers > 1:
for shift in shifts:
shifted_reference = self._shift_request(self.reference, shift)
self.request_queue.put(shifted_reference)
for i in tqdm.tqdm(range(num_chunks)):
chunk = self.workers.get()
if not empty_request:
self._add_to_batch(request, chunk)
logger.debug("processed chunk %d/%d", i + 1, num_chunks)
else:
for i, shift in enumerate(tqdm.tqdm(shifts)):
shifted_reference = self._shift_request(self.reference, shift)
chunk = self._get_chunk(shifted_reference)
if not empty_request:
self._add_to_batch(request, chunk)
logger.debug("processed chunk %d/%d", i + 1, num_chunks)
batch = self.batch
self.batch = None
logger.debug("returning batch %s", batch)
return batch
def _get_stride(self):
"""Get the maximal amount by which ``reference`` can be moved, such
that it tiles the space."""
stride = None
# get the least common multiple of all voxel sizes, we have to stride
# at least that far
lcm_voxel_size = self.spec.get_lcm_voxel_size(self.reference.array_specs.keys())
# that's just the minimal size in each dimension
for key, reference_spec in self.reference.items():
shape = reference_spec.roi.shape
for d in range(len(lcm_voxel_size)):
assert shape[d] >= lcm_voxel_size[d], (
"Shape of reference "
"ROI %s for %s is "
"smaller than least "
"common multiple of "
"voxel size "
"%s" % (reference_spec.roi, key, lcm_voxel_size)
)
if stride is None:
stride = shape
else:
stride = Coordinate((min(a, b) for a, b in zip(stride, shape)))
return stride
def _get_shift_roi(self, spec):
"""Get the minimal and maximal shift (as a ROI) to apply to
``self.reference``, such that it is still fully contained in ``spec``.
"""
total_shift_roi = None
# get individual shift ROIs and intersect them
for key, reference_spec in self.reference.items():
logger.debug("getting shift roi for %s with spec %s", key, reference_spec)
if key not in spec:
logger.debug("skipping, %s not in upstream spec", key)
continue
if spec[key].roi is None:
logger.debug("skipping, %s has not ROI", key)
continue
logger.debug("upstream ROI is %s", spec[key].roi)
for r, s in zip(reference_spec.roi.shape, spec[key].roi.shape):
assert s is None or r <= s, (
"reference %s with ROI %s does not fit into provided "
"upstream %s" % (key, reference_spec.roi, spec[key].roi)
)
# we have a reference ROI
#
# [--------) [9]
# 3 12
#
# and a spec ROI
#
# [---------------) [16]
# 16 32
#
# min and max shifts of reference are
#
# [--------) [9]
# 16 25
# [--------) [9]
# 23 32
#
# therefore, all possible ways to shift the reference such that it
# is contained in the spec are at least 16-3=13 and at most 23-3=20
# (inclusive)
#
# [-------) [8]
# 13 21
#
# 1. the starting point is beginning of spec - beginning of reference
# 2. the length is length of spec - length of reference + 1
# 1. get the starting point of the shift ROI
shift_begin = spec[key].roi.begin - reference_spec.roi.begin
# 2. get the shape of the shift ROI
shift_shape = spec[key].roi.shape - reference_spec.roi.shape + 1
# create a ROI...
shift_roi = Roi(shift_begin, shift_shape)
logger.debug("shift ROI for %s is %s", key, shift_roi)
# ...and intersect it with previous shift ROIs
if total_shift_roi is None:
total_shift_roi = shift_roi
else:
total_shift_roi = total_shift_roi.intersect(shift_roi)
if total_shift_roi.empty:
raise RuntimeError(
"There is no location where the ROIs "
"the reference %s are contained in the "
"request/upstream ROIs "
"%s." % (self.reference, spec)
)
logger.debug(
"intersected with total shift ROI this yields %s", total_shift_roi
)
if total_shift_roi is None:
raise RuntimeError(
"None of the upstream ROIs are bounded (all "
"ROIs are None). Scan needs at least one "
"bounded upstream ROI."
)
return total_shift_roi
def _enumerate_shifts(self, shift_roi, stride):
"""Produces a sequence of shift coordinates starting at the beginning
of ``shift_roi``, progressing with ``stride``. The maximum shift
coordinate in any dimension will be the last point inside the shift roi
in this dimension."""
min_shift = shift_roi.offset
max_shift = max(min_shift, Coordinate(m - 1 for m in shift_roi.end))
shift = np.array(min_shift)
shifts = []
dims = len(min_shift)
logger.debug("enumerating possible shifts of %s in %s", stride, shift_roi)
while True:
logger.debug("adding %s", shift)
shifts.append(Coordinate(shift))
if (shift == max_shift).all():
break
# count up dimensions
for d in range(dims):
if shift[d] >= max_shift[d]:
if d == dims - 1:
break
shift[d] = min_shift[d]
else:
shift[d] += stride[d]
# snap to last possible shift, don't overshoot
if shift[d] > max_shift[d]:
shift[d] = max_shift[d]
break
return shifts
def _shift_request(self, request, shift):
shifted = request.copy()
for _, spec in shifted.items():
spec.roi = spec.roi.shift(shift)
return shifted
def _worker_get_chunk(self):
request = self.request_queue.get()
return self._get_chunk(request)
def _get_chunk(self, request):
return self.get_upstream_provider().request_batch(request)
def _add_to_batch(self, spec, chunk):
if self.batch.get_total_roi() is None:
self.batch = self._setup_batch(spec, chunk)
self.batch.profiling_stats.merge_with(chunk.profiling_stats)
for array_key, array in chunk.arrays.items():
if array_key not in spec:
continue
self._fill(
self.batch.arrays[array_key].data,
array.data,
spec.array_specs[array_key].roi,
array.spec.roi,
self.spec[array_key].voxel_size,
)
for graph_key, graphs in chunk.graphs.items():
if graph_key not in spec:
continue
self._fill_points(
self.batch.graphs[graph_key],
graphs,
spec.graph_specs[graph_key].roi,
graphs.spec.roi,
)
def _setup_batch(self, batch_spec, chunk):
"""Allocate a batch matching the sizes of ``batch_spec``, using
``chunk`` as template."""
batch = Batch()
for array_key, spec in batch_spec.array_specs.items():
roi = spec.roi
voxel_size = self.spec[array_key].voxel_size
# get the 'non-spatial' shape of the chunk-batch
# and append the shape of the request to it
array = chunk.arrays[array_key]
shape = array.data.shape[: -roi.dims]
shape += roi.shape // voxel_size
spec = self.spec[array_key].copy()
spec.roi = roi
logger.info("allocating array of shape %s for %s", shape, array_key)
batch.arrays[array_key] = Array(
data=np.zeros(shape, dtype=spec.dtype), spec=spec
)
for graph_key, spec in batch_spec.graph_specs.items():
roi = spec.roi
spec = self.spec[graph_key].copy()
spec.roi = roi
batch.graphs[graph_key] = Graph(nodes=[], edges=[], spec=spec)
logger.debug("setup batch to fill %s", batch)
return batch
def _fill(self, a, b, roi_a, roi_b, voxel_size):
logger.debug("filling " + str(roi_b) + " into " + str(roi_a))
roi_a = roi_a // voxel_size
roi_b = roi_b // voxel_size
common_roi = roi_a.intersect(roi_b)
if common_roi.empty:
return
common_in_a_roi = common_roi - roi_a.offset
common_in_b_roi = common_roi - roi_b.offset
slices_a = common_in_a_roi.get_bounding_box()
slices_b = common_in_b_roi.get_bounding_box()
if len(a.shape) > len(slices_a):
slices_a = (slice(None),) * (len(a.shape) - len(slices_a)) + slices_a
slices_b = (slice(None),) * (len(b.shape) - len(slices_b)) + slices_b
a[slices_a] = b[slices_b]
def _fill_points(self, a, b, roi_a, roi_b):
"""
Take points from b and add them to a.
Nodes marked temporary must be ignored. Temporary nodes are nodes
that were created during processing. Since it is impossible to know
in general, that a node created during processing of a subgraph was
not assigned an id that is already used by the full graph, we cannot
include temporary nodes and assume there will not be ambiguous node
id's that correspond to multiple distinct nodes.
"""
logger.debug("filling points of " + str(roi_b) + " into points of" + str(roi_a))
common_roi = roi_a.intersect(roi_b)
if common_roi is None:
return
for node in b.nodes:
if not node.temporary and roi_a.contains(node.location):
a.add_node(node)
for e in b.edges:
bu = b.node(e.u)
bv = b.node(e.v)
if (
not bu.temporary
and not bv.temporary
and a.contains(bu.id)
and a.contains(bv.id)
):
a.add_edge(e)
| 13,722 | 33.65404 | 88 | py |
gunpowder | gunpowder-master/gunpowder/nodes/exclude_labels.py | import logging
import numpy as np
from scipy.ndimage.morphology import distance_transform_edt
from .batch_filter import BatchFilter
from gunpowder.array import Array
logger = logging.getLogger(__name__)
class ExcludeLabels(BatchFilter):
"""Excludes several labels from the ground-truth.
The labels will be replaced by background_value. An optional ignore mask
will be created and set to 0 for the excluded locations that are further
than a threshold away from not excluded locations.
Args:
labels (:class:`ArrayKey`):
The array containing the labels.
exclude (``list`` of ``int``):
The labels to exclude from ``labels``.
ignore_mask (:class:`ArrayKey`, optional):
The ignore mask to create.
ignore_mask_erode (``float``, optional):
By how much (in world units) to erode the ignore mask.
background_value (``int``, optional):
Value to replace excluded IDs, defaults to 0.
"""
def __init__(
self, labels, exclude, ignore_mask=None, ignore_mask_erode=0, background_value=0
):
self.labels = labels
self.exclude = set(exclude)
self.ignore_mask = ignore_mask
self.ignore_mask_erode = ignore_mask_erode
self.background_value = background_value
def setup(self):
assert (
self.labels in self.spec
), "ExcludeLabels can only be used if GT_LABELS is provided upstream."
if self.ignore_mask:
self.provides(self.ignore_mask, self.spec[self.labels])
def process(self, batch, request):
gt = batch.arrays[self.labels]
# 0 marks included regions (to be used directly with distance transform
# later)
include_mask = np.ones(gt.data.shape)
gt_labels = np.unique(gt.data)
logger.debug("batch contains GT labels: " + str(gt_labels))
for label in gt_labels:
if label in self.exclude:
logger.debug("excluding label " + str(label))
gt.data[gt.data == label] = self.background_value
else:
include_mask[gt.data == label] = 0
# if no ignore mask is provided or requested, we are done
if not self.ignore_mask or not self.ignore_mask in request:
return
voxel_size = self.spec[self.labels].voxel_size
distance_to_include = distance_transform_edt(include_mask, sampling=voxel_size)
logger.debug("max distance to foreground is " + str(distance_to_include.max()))
# 1 marks included regions, plus a context area around them
include_mask = distance_to_include < self.ignore_mask_erode
# include mask was computed on labels ROI, we need to copy it to
# the requested ignore_mask ROI
gt_ignore_roi = request[self.ignore_mask].roi
intersection = gt.spec.roi.intersect(gt_ignore_roi)
intersection_in_gt = intersection - gt.spec.roi.offset
intersection_in_gt_ignore = intersection - gt_ignore_roi.offset
# to voxel coordinates
intersection_in_gt //= voxel_size
intersection_in_gt_ignore //= voxel_size
gt_ignore = np.zeros((gt_ignore_roi // voxel_size).get_shape(), dtype=np.uint8)
gt_ignore[intersection_in_gt_ignore.get_bounding_box()] = include_mask[
intersection_in_gt.get_bounding_box()
]
spec = self.spec[self.labels].copy()
spec.roi = gt_ignore_roi
spec.dtype = np.uint8
batch.arrays[self.ignore_mask] = Array(gt_ignore, spec)
| 3,596 | 33.257143 | 88 | py |
gunpowder | gunpowder-master/gunpowder/nodes/dvid_source.py | import logging
import numpy as np
from gunpowder.batch import Batch
from gunpowder.coordinate import Coordinate
from gunpowder.ext import dvision
from gunpowder.profiling import Timing
from gunpowder.roi import Roi
from gunpowder.array import Array
from gunpowder.array_spec import ArraySpec
from .batch_provider import BatchProvider
logger = logging.getLogger(__name__)
class DvidSource(BatchProvider):
"""A DVID array source.
Provides arrays from DVID servers for each array key given.
Args:
hostname (``string``):
The name of the DVID server.
port (``int``):
The port of the DVID server.
uuid (``string``):
The UUID of the DVID node to use.
datasets (``dict``, :class:`ArrayKey` -> ``string``):
Dictionary mapping array keys to DVID data instance names that this
source offers.
masks (``dict``, :class:`ArrayKey` -> ``string``, optional):
Dictionary of array keys to DVID ROI instance names. This will
create binary masks from DVID ROIs.
array_specs (``dict``, :class:`ArrayKey` -> :class:`ArraySpec`, optional):
An optional dictionary of array keys to specs to overwrite the
array specs automatically determined from the DVID server. This is
useful to set ``voxel_size``, for example. Only fields that are not
``None`` in the given :class:`ArraySpec` will be used.
"""
def __init__(self, hostname, port, uuid, datasets, masks=None, array_specs=None):
self.hostname = hostname
self.port = port
self.url = "http://{}:{}".format(self.hostname, self.port)
self.uuid = uuid
self.datasets = datasets
self.masks = masks if masks is not None else {}
self.array_specs = array_specs if array_specs is not None else {}
self.ndims = None
def setup(self):
for array_key, _ in self.datasets.items():
spec = self.__get_spec(array_key)
self.provides(array_key, spec)
for array_key, _ in self.masks.items():
spec = self.__get_mask_spec(array_key)
self.provides(array_key, spec)
logger.info("DvidSource.spec:\n%s", self.spec)
def provide(self, request):
timing = Timing(self)
timing.start()
batch = Batch()
for array_key, request_spec in request.array_specs.items():
logger.debug("Reading %s in %s...", array_key, request_spec.roi)
voxel_size = self.spec[array_key].voxel_size
# scale request roi to voxel units
dataset_roi = request_spec.roi / voxel_size
# shift request roi into dataset
dataset_roi = dataset_roi - self.spec[array_key].roi.offset / voxel_size
# create array spec
array_spec = self.spec[array_key].copy()
array_spec.roi = request_spec.roi
# read the data
if array_key in self.datasets:
data = self.__read_array(self.datasets[array_key], dataset_roi)
elif array_key in self.masks:
data = self.__read_mask(self.masks[array_key], dataset_roi)
else:
assert False, (
"Encountered a request for %s that is neither a volume "
"nor a mask." % array_key
)
# add array to batch
batch.arrays[array_key] = Array(data, array_spec)
logger.debug("done")
timing.stop()
batch.profiling_stats.add(timing)
return batch
def __get_info(self, array_key):
if array_key in self.datasets:
data = dvision.DVIDDataInstance(
self.hostname, self.port, self.uuid, self.datasets[array_key]
)
elif array_key in self.masks:
data = dvision.DVIDRegionOfInterest(
self.hostname, self.port, self.uuid, self.masks[array_key]
)
else:
assert False, (
"Encountered a request that is neither a volume " "nor a mask."
)
return data.info
def __get_spec(self, array_key):
info = self.__get_info(array_key)
roi_min = info["Extended"]["MinPoint"]
if roi_min is not None:
roi_min = Coordinate(roi_min[::-1])
roi_max = info["Extended"]["MaxPoint"]
if roi_max is not None:
roi_max = Coordinate(roi_max[::-1])
data_roi = Roi(offset=roi_min, shape=(roi_max - roi_min))
data_dims = Coordinate(data_roi.shape)
if self.ndims is None:
self.ndims = len(data_dims)
else:
assert self.ndims == len(data_dims)
if array_key in self.array_specs:
spec = self.array_specs[array_key].copy()
else:
spec = ArraySpec()
if spec.voxel_size is None:
spec.voxel_size = Coordinate(info["Extended"]["VoxelSize"])
if spec.roi is None:
spec.roi = data_roi * spec.voxel_size
data_dtype = dvision.DVIDDataInstance(
self.hostname, self.port, self.uuid, self.datasets[array_key]
).dtype
if spec.dtype is not None:
assert spec.dtype == data_dtype, (
"dtype %s provided in array_specs for %s, "
"but differs from instance %s dtype %s"
% (
self.array_specs[array_key].dtype,
array_key,
self.datasets[array_key],
data_dtype,
)
)
else:
spec.dtype = data_dtype
if spec.interpolatable is None:
spec.interpolatable = spec.dtype in [
np.float32,
np.float64,
np.float128,
np.uint8, # assuming this is not used for labels
]
logger.warning(
"WARNING: You didn't set 'interpolatable' for %s. "
"Based on the dtype %s, it has been set to %s. "
"This might not be what you want.",
array_key,
spec.dtype,
spec.interpolatable,
)
return spec
def __get_mask_spec(self, mask_key):
# create initial array spec
if mask_key in self.array_specs:
spec = self.array_specs[mask_key].copy()
else:
spec = ArraySpec()
# get voxel size
if spec.voxel_size is None:
voxel_size = None
for array_key in self.datasets:
if voxel_size is None:
voxel_size = self.spec[array_key].voxel_size
else:
assert voxel_size == self.spec[array_key].voxel_size, (
"No voxel size was given for mask %s, and the voxel "
"sizes of the volumes %s are not all the same. I don't "
"know what voxel size to use to create the mask."
% (mask_key, self.datasets.keys())
)
spec.voxel_size = voxel_size
# get ROI
if spec.roi is None:
for array_key in self.datasets:
roi = self.spec[array_key].roi
if spec.roi is None:
spec.roi = roi.copy()
else:
spec.roi = roi.union(spec.roi)
# set interpolatable
if spec.interpolatable is None:
spec.interpolatable = False
# set datatype
if spec.dtype is not None and spec.dtype != np.uint8:
logger.warn(
"Ignoring dtype in array_spec for %s, only np.uint8 "
"is allowed for masks.",
mask_key,
)
spec.dtype = np.uint8
return spec
def __read_array(self, instance, roi):
data_instance = dvision.DVIDDataInstance(
self.hostname, self.port, self.uuid, instance
)
return data_instance[roi.get_bounding_box()]
def __read_mask(self, instance, roi):
dvid_roi = dvision.DVIDRegionOfInterest(
self.hostname, self.port, self.uuid, instance
)
return dvid_roi[roi.get_bounding_box()]
def __repr__(self):
return "DvidSource(hostname={}, port={}, uuid={}".format(
self.hostname, self.port, self.uuid
)
| 8,505 | 30.157509 | 85 | py |
gunpowder | gunpowder-master/gunpowder/nodes/merge_provider.py | from gunpowder.provider_spec import ProviderSpec
from gunpowder.batch import Batch
from gunpowder.batch_request import BatchRequest
from .batch_provider import BatchProvider
import random
class MergeProvider(BatchProvider):
"""Merges different providers::
(a, b, c) + MergeProvider()
will create a provider that combines the arrays and points offered by
``a``, ``b``, and ``c``. Array and point keys of ``a``, ``b``, and ``c`` should be
the disjoint.
"""
def __init__(self):
self.key_to_provider = {}
def setup(self):
self.enable_placeholders()
assert (
len(self.get_upstream_providers()) > 0
), "at least one batch provider needs to be added to the MergeProvider"
# Only allow merging if no two upstream_providers have the same
# array/points keys
error_message = (
"Key {} provided by more than one upstream provider. Node MergeProvider only allows to merge "
"providers with different keys."
)
for provider in self.get_upstream_providers():
for key, spec in provider.spec.items():
assert self.spec is None or key not in self.spec, error_message.format(
key
)
self.provides(key, spec)
self.key_to_provider[key] = provider
def provide(self, request):
# create upstream requests
upstream_requests = {}
for key, spec in request.items():
provider = self.key_to_provider[key]
if provider not in upstream_requests:
# use new random seeds per upstream request.
# seeds picked by random should be deterministic since
# the provided request already has a random seed.
seed = random.randint(0, 2**32)
upstream_requests[provider] = BatchRequest(random_seed=seed)
upstream_requests[provider][key] = spec
# execute requests, merge batches
merged_batch = Batch()
for provider, upstream_request in upstream_requests.items():
batch = provider.request_batch(upstream_request)
for key, array in batch.arrays.items():
merged_batch.arrays[key] = array
for key, graph in batch.graphs.items():
merged_batch.graphs[key] = graph
merged_batch.profiling_stats.merge_with(batch.profiling_stats)
return merged_batch
| 2,498 | 36.298507 | 106 | py |
gunpowder | gunpowder-master/gunpowder/nodes/add_affinities.py | import logging
import numpy as np
from .batch_filter import BatchFilter
from gunpowder.array import Array
from gunpowder.batch_request import BatchRequest
from gunpowder.batch import Batch
from gunpowder.coordinate import Coordinate
logger = logging.getLogger(__name__)
def seg_to_affgraph(seg, nhood):
nhood = np.array(nhood)
# constructs an affinity graph from a segmentation
# assume affinity graph is represented as:
# shape = (e, z, y, x)
# nhood.shape = (edges, 3)
shape = seg.shape
nEdge = nhood.shape[0]
dims = nhood.shape[1]
aff = np.zeros((nEdge,) + shape, dtype=np.int32)
if dims == 2:
for e in range(nEdge):
aff[
e,
max(0, -nhood[e, 0]) : min(shape[0], shape[0] - nhood[e, 0]),
max(0, -nhood[e, 1]) : min(shape[1], shape[1] - nhood[e, 1]),
] = (
(
seg[
max(0, -nhood[e, 0]) : min(shape[0], shape[0] - nhood[e, 0]),
max(0, -nhood[e, 1]) : min(shape[1], shape[1] - nhood[e, 1]),
]
== seg[
max(0, nhood[e, 0]) : min(shape[0], shape[0] + nhood[e, 0]),
max(0, nhood[e, 1]) : min(shape[1], shape[1] + nhood[e, 1]),
]
)
* (
seg[
max(0, -nhood[e, 0]) : min(shape[0], shape[0] - nhood[e, 0]),
max(0, -nhood[e, 1]) : min(shape[1], shape[1] - nhood[e, 1]),
]
> 0
)
* (
seg[
max(0, nhood[e, 0]) : min(shape[0], shape[0] + nhood[e, 0]),
max(0, nhood[e, 1]) : min(shape[1], shape[1] + nhood[e, 1]),
]
> 0
)
)
elif dims == 3:
for e in range(nEdge):
aff[
e,
max(0, -nhood[e, 0]) : min(shape[0], shape[0] - nhood[e, 0]),
max(0, -nhood[e, 1]) : min(shape[1], shape[1] - nhood[e, 1]),
max(0, -nhood[e, 2]) : min(shape[2], shape[2] - nhood[e, 2]),
] = (
(
seg[
max(0, -nhood[e, 0]) : min(shape[0], shape[0] - nhood[e, 0]),
max(0, -nhood[e, 1]) : min(shape[1], shape[1] - nhood[e, 1]),
max(0, -nhood[e, 2]) : min(shape[2], shape[2] - nhood[e, 2]),
]
== seg[
max(0, nhood[e, 0]) : min(shape[0], shape[0] + nhood[e, 0]),
max(0, nhood[e, 1]) : min(shape[1], shape[1] + nhood[e, 1]),
max(0, nhood[e, 2]) : min(shape[2], shape[2] + nhood[e, 2]),
]
)
* (
seg[
max(0, -nhood[e, 0]) : min(shape[0], shape[0] - nhood[e, 0]),
max(0, -nhood[e, 1]) : min(shape[1], shape[1] - nhood[e, 1]),
max(0, -nhood[e, 2]) : min(shape[2], shape[2] - nhood[e, 2]),
]
> 0
)
* (
seg[
max(0, nhood[e, 0]) : min(shape[0], shape[0] + nhood[e, 0]),
max(0, nhood[e, 1]) : min(shape[1], shape[1] + nhood[e, 1]),
max(0, nhood[e, 2]) : min(shape[2], shape[2] + nhood[e, 2]),
]
> 0
)
)
else:
raise RuntimeError(f"AddAffinities works only in 2 or 3 dimensions, not {dims}")
return aff
class AddAffinities(BatchFilter):
"""Add an array with affinities for a given label array and neighborhood to
the batch. Affinity values are created one for each voxel and entry in the
neighborhood list, i.e., for each voxel and each neighbor of this voxel.
Values are 1 iff both labels (of the voxel and the neighbor) are equal and
non-zero.
Args:
affinity_neighborhood (``list`` of array-like):
List of offsets for the affinities to consider for each voxel.
labels (:class:`ArrayKey`):
The array to read the labels from.
affinities (:class:`ArrayKey`):
The array to generate containing the affinities.
labels_mask (:class:`ArrayKey`, optional):
The array to use as a mask for ``labels``. Affinities connecting at
least one masked out label will be masked out in
``affinities_mask``. If not given, ``affinities_mask`` will contain
ones everywhere (if requested).
unlabelled (:class:`ArrayKey`, optional):
A binary array to indicate unlabelled areas with 0. Affinities from
labelled to unlabelled voxels are set to 0, affinities between
unlabelled voxels are masked out (they will not be used for
training).
affinities_mask (:class:`ArrayKey`, optional):
The array to generate containing the affinitiy mask, as derived
from parameter ``labels_mask``.
"""
def __init__(
self,
affinity_neighborhood,
labels,
affinities,
labels_mask=None,
unlabelled=None,
affinities_mask=None,
dtype=np.uint8,
):
self.affinity_neighborhood = np.array(affinity_neighborhood)
self.labels = labels
self.unlabelled = unlabelled
self.labels_mask = labels_mask
self.affinities = affinities
self.affinities_mask = affinities_mask
self.dtype = dtype
def setup(self):
assert self.labels in self.spec, (
"Upstream does not provide %s needed by " "AddAffinities" % self.labels
)
voxel_size = self.spec[self.labels].voxel_size
dims = self.affinity_neighborhood.shape[1]
self.padding_neg = (
Coordinate(
min([0] + [a[d] for a in self.affinity_neighborhood])
for d in range(dims)
)
* voxel_size
)
self.padding_pos = (
Coordinate(
max([0] + [a[d] for a in self.affinity_neighborhood])
for d in range(dims)
)
* voxel_size
)
logger.debug("padding neg: " + str(self.padding_neg))
logger.debug("padding pos: " + str(self.padding_pos))
spec = self.spec[self.labels].copy()
if spec.roi is not None:
spec.roi = spec.roi.grow(self.padding_neg, -self.padding_pos)
spec.dtype = self.dtype
self.provides(self.affinities, spec)
if self.affinities_mask:
self.provides(self.affinities_mask, spec)
self.enable_autoskip()
def prepare(self, request):
deps = BatchRequest()
# grow labels ROI to accomodate padding
labels_roi = request[self.affinities].roi.grow(
-self.padding_neg, self.padding_pos
)
deps[self.labels] = request[self.affinities].copy()
deps[self.labels].dtype = None
deps[self.labels].roi = labels_roi
if self.labels_mask:
deps[self.labels_mask] = deps[self.labels].copy()
if self.unlabelled:
deps[self.unlabelled] = deps[self.labels].copy()
return deps
def process(self, batch, request):
outputs = Batch()
affinities_roi = request[self.affinities].roi
logger.debug("computing ground-truth affinities from labels")
affinities = seg_to_affgraph(
batch.arrays[self.labels].data.astype(np.int32), self.affinity_neighborhood
).astype(self.dtype)
# crop affinities to requested ROI
offset = affinities_roi.offset
shift = -offset - self.padding_neg
crop_roi = affinities_roi.shift(shift)
crop_roi /= self.spec[self.labels].voxel_size
crop = crop_roi.get_bounding_box()
logger.debug("cropping with " + str(crop))
affinities = affinities[(slice(None),) + crop]
spec = self.spec[self.affinities].copy()
spec.roi = affinities_roi
outputs.arrays[self.affinities] = Array(affinities, spec)
if self.affinities_mask and self.affinities_mask in request:
if self.labels_mask:
logger.debug(
"computing ground-truth affinities mask from " "labels mask"
)
affinities_mask = seg_to_affgraph(
batch.arrays[self.labels_mask].data.astype(np.int32),
self.affinity_neighborhood,
)
affinities_mask = affinities_mask[(slice(None),) + crop]
else:
affinities_mask = np.ones_like(affinities)
if self.unlabelled:
# 1 for all affinities between unlabelled voxels
unlabelled = 1 - batch.arrays[self.unlabelled].data
unlabelled_mask = seg_to_affgraph(
unlabelled.astype(np.int32), self.affinity_neighborhood
)
unlabelled_mask = unlabelled_mask[(slice(None),) + crop]
# 0 for all affinities between unlabelled voxels
unlabelled_mask = 1 - unlabelled_mask
# combine with mask
affinities_mask = affinities_mask * unlabelled_mask
affinities_mask = affinities_mask.astype(affinities.dtype)
outputs.arrays[self.affinities_mask] = Array(affinities_mask, spec)
else:
if self.labels_mask is not None:
logger.warning(
"GT labels does have a mask, but affinities "
"mask is not requested."
)
# Should probably have a better way of handling arbitrary batch attributes
batch.affinity_neighborhood = self.affinity_neighborhood
return outputs
| 10,113 | 34.738516 | 88 | py |
gunpowder | gunpowder-master/gunpowder/nodes/daisy_request_blocks.py | from gunpowder.batch import Batch
from gunpowder.ext import daisy
from gunpowder.nodes.batch_filter import BatchFilter
from gunpowder.roi import Roi
import logging
import multiprocessing
import time
logger = logging.getLogger(__name__)
class DaisyRequestBlocks(BatchFilter):
"""Iteratively requests batches similar to ``reference`` from upstream
providers, with their ROIs set to blocks distributed by ``daisy``.
The ROIs of the array or point specs in the reference can be set to either
the block's ``read_roi`` or ``write_roi``, see parameter ``roi_map``.
The batch request to this node has to be empty, as there is no guarantee
that this node will get to process all chunks required to fulfill a
particular batch request.
Args:
reference (:class:`BatchRequest`):
A reference :class:`BatchRequest`. This request will be shifted
according to blocks distributed by ``daisy``.
roi_map (``dict`` from :class:`ArrayKey` or :class:`GraphKey` to
``string``):
A map indicating which daisy block ROI (``read_roi`` or
``write_roi``) to use for which item in the reference request.
num_workers (``int``, optional):
If set to >1, upstream requests are made in parallel with that
number of workers.
block_done_callback (function, optional):
If given, will be called with arguments ``(block, start,
duration)`` for each block that was processed. ``start`` and
``duration`` will be given in seconds, as in ``start =
time.time()`` and ``duration = time.time() - start``, right before
and after a block gets processed.
This callback can be used to log blocks that have successfully
finished processing, which can be used in ``check_function`` of
``daisy.run_blockwise`` to skip already processed blocks in
repeated runs.
"""
def __init__(self, reference, roi_map, num_workers=1, block_done_callback=None):
self.reference = reference
self.roi_map = roi_map
self.num_workers = num_workers
self.block_done_callback = block_done_callback
if num_workers > 1:
self.request_queue = multiprocessing.Queue(maxsize=0)
def provide(self, request):
empty_request = len(request) == 0
if not empty_request:
raise RuntimeError("requests made to DaisyRequestBlocks have to be empty")
if self.num_workers > 1:
self.workers = [
multiprocessing.Process(target=self.__get_chunks)
for _ in range(self.num_workers)
]
for worker in self.workers:
worker.start()
for worker in self.workers:
worker.join()
else:
self.__get_chunks()
return Batch()
def __get_chunks(self):
daisy_client = daisy.Client()
while True:
with daisy_client.acquire_block() as block:
if block is None:
return
logger.info("Processing block %s", block)
start = time.time()
chunk_request = self.reference.copy()
for key, reference_spec in self.reference.items():
roi_type = self.roi_map.get(key, None)
if roi_type is None:
raise RuntimeError(
"roi_map does not map item %s to either 'read_roi' "
"or 'write_roi'" % key
)
if roi_type == "read_roi":
chunk_request[key].roi = Roi(
block.read_roi.offset, block.read_roi.shape
)
elif roi_type == "write_roi":
chunk_request[key].roi = Roi(
block.write_roi.offset, block.write_roi.shape
)
else:
raise RuntimeError(
"%s is not a vaid ROI type (read_roi or write_roi)"
)
self.get_upstream_provider().request_batch(chunk_request)
end = time.time()
if self.block_done_callback:
self.block_done_callback(block, start, end - start)
| 4,453 | 34.349206 | 86 | py |
gunpowder | gunpowder-master/gunpowder/nodes/generic_predict.py | import logging
import multiprocessing
import time
from gunpowder.nodes.batch_filter import BatchFilter
from gunpowder.producer_pool import ProducerPool, WorkersDied, NoResult
from gunpowder.array import ArrayKey
from gunpowder.array_spec import ArraySpec
from gunpowder.batch_request import BatchRequest
from queue import Full
logger = logging.getLogger(__name__)
class PredictProcessDied(Exception):
pass
class GenericPredict(BatchFilter):
"""Generic predict node to add predictions of a trained network to each each
batch that passes through. This node alone does nothing and should be
subclassed for concrete implementations.
Args:
inputs (dict): Dictionary from names of input layers in the network to
:class:``ArrayKey`` or batch attribute name as string.
outputs (dict): Dictionary from the names of output layers in the
network to :class:``ArrayKey``. New arrays will be generated by
this node for each entry (if requested downstream).
array_specs (dict, optional): An optional dictionary of
:class:`ArrayKey` to :class:`ArraySpec` to set the array specs
generated arrays (``outputs`` and ``gradients``). This is useful
to set the ``voxel_size``, for example, if they differ from the
voxel size of the input arrays. Only fields that are not ``None``
in the given :class:`ArraySpec` will be used.
spawn_subprocess (bool, optional): Whether to run ``predict`` in a
separate process. Default is false.
"""
def __init__(self, inputs, outputs, array_specs=None, spawn_subprocess=False):
self.initialized = False
self.inputs = inputs
self.outputs = outputs
self.array_specs = {} if array_specs is None else array_specs
self.spawn_subprocess = spawn_subprocess
self.timer_start = None
def setup(self):
# get common voxel size of inputs, or None if they differ
common_voxel_size = None
for key in self.inputs.values():
if not isinstance(key, ArrayKey):
continue
voxel_size = self.spec[key].voxel_size
if common_voxel_size is None:
common_voxel_size = voxel_size
elif common_voxel_size != voxel_size:
common_voxel_size = None
break
# announce provided outputs
for key in self.outputs.values():
if key in self.array_specs:
spec = self.array_specs[key].copy()
else:
spec = ArraySpec()
if spec.voxel_size is None and not spec.nonspatial:
assert common_voxel_size is not None, (
"There is no common voxel size of the inputs, and no "
"ArraySpec has been given for %s that defines "
"voxel_size." % key
)
spec.voxel_size = common_voxel_size
if spec.interpolatable is None:
# default for predictions
spec.interpolatable = False
self.provides(key, spec)
if self.spawn_subprocess:
# start prediction as a producer pool, so that we can gracefully
# exit if anything goes wrong
self.worker = ProducerPool([self.__produce_predict_batch], queue_size=1)
self.batch_in = multiprocessing.Queue(maxsize=1)
self.batch_in_lock = multiprocessing.Lock()
self.batch_out_lock = multiprocessing.Lock()
self.worker.start()
def teardown(self):
if self.spawn_subprocess:
# signal "stop"
try:
self.batch_in.put((None, None), timeout=2)
except Full:
# worker process might be stopped already
pass
try:
self.worker.get(timeout=2)
except NoResult:
pass
self.worker.stop()
else:
self.stop()
def prepare(self, request):
if not self.initialized and not self.spawn_subprocess:
self.start()
self.initialized = True
deps = BatchRequest()
for key in self.inputs.values():
deps[key] = request[key]
return deps
def process(self, batch, request):
if self.spawn_subprocess:
start = time.time()
self.batch_in_lock.acquire()
logger.debug("waited for batch in lock for %.3fs", time.time() - start)
start = time.time()
self.batch_in.put((batch, request))
logger.debug("queued batch for %.3fs", time.time() - start)
start = time.time()
with self.batch_out_lock:
logger.debug("waited for batch out lock for %.3fs", time.time() - start)
start = time.time()
self.batch_in_lock.release()
logger.debug("released batch in lock for %.3fs", time.time() - start)
try:
start = time.time()
out = self.worker.get()
logger.debug("retreived batch for %.3fs", time.time() - start)
except WorkersDied:
raise PredictProcessDied()
for array_key in self.outputs.values():
if array_key in request:
batch.arrays[array_key] = out.arrays[array_key]
else:
self.predict(batch, request)
def start(self):
"""To be implemented in subclasses.
This method will be called before the first call to :fun:`predict`,
from the same process that :fun:`predict` will be called from. Use
this to initialize your model and hardware.
"""
pass
def predict(self, batch, request):
"""To be implemented in subclasses.
In this method, an implementation should predict arrays on the given
batch. Output arrays should be created according to the given request
and added to ``batch``."""
raise NotImplementedError("Class %s does not implement 'predict'" % self.name())
def stop(self):
"""To be implemented in subclasses.
This method will be called after the last call to :fun:`predict`,
from the same process that :fun:`predict` will be called from. Use
this to tear down your model and free training hardware.
"""
pass
def __produce_predict_batch(self):
"""Process one batch."""
if not self.initialized:
self.start()
self.initialized = True
self.time_out = 0
if self.timer_start is not None:
self.time_out = time.time() - self.timer_start
self.timer_start = time.time()
batch, request = self.batch_in.get()
self.time_in = time.time() - self.timer_start
# stop signal
if batch is None:
self.stop()
return None
self.timer_start = time.time()
self.predict(batch, request)
self.time_predict = time.time() - self.timer_start
self.timer_start = time.time()
logger.debug(
"batch in: %.3fs, predict: %.3fs, batch out: %.3fs",
self.time_in,
self.time_predict,
self.time_out,
)
return batch
| 7,420 | 33.840376 | 88 | py |
gunpowder | gunpowder-master/gunpowder/nodes/rasterize_graph.py | import copy
import logging
import numpy as np
from scipy.ndimage.filters import gaussian_filter
from skimage import draw
from .batch_filter import BatchFilter
from gunpowder.array import Array
from gunpowder.array_spec import ArraySpec
from gunpowder.batch_request import BatchRequest
from gunpowder.coordinate import Coordinate
from gunpowder.freezable import Freezable
from gunpowder.morphology import enlarge_binary_map, create_ball_kernel
from gunpowder.ndarray import replace
from gunpowder.graph import GraphKey
from gunpowder.graph_spec import GraphSpec
from gunpowder.roi import Roi
logger = logging.getLogger(__name__)
class RasterizationSettings(Freezable):
"""Data structure to store parameters for rasterization of graph.
Args:
radius (``float`` or ``tuple`` of ``float``):
The radius (for balls or tubes) or sigma (for peaks) in world units.
mode (``string``):
One of ``ball`` or ``peak``. If ``ball`` (the default), a ball with the
given ``radius`` will be drawn. If ``peak``, the point will be
rasterized as a peak with values :math:`\exp(-|x-p|^2/\sigma)` with
sigma set by ``radius``.
mask (:class:`ArrayKey`, optional):
Used to mask the rasterization of points. The array is assumed to
contain discrete labels. The object id at the specific point being
rasterized is used to intersect the rasterization to keep it inside
the specific object.
inner_radius_fraction (``float``, optional):
Only for mode ``ball``.
If set, instead of a ball, a hollow sphere is rastered. The radius
of the whole sphere corresponds to the radius specified with
``radius``. This parameter sets the radius of the hollow area, as a
fraction of ``radius``.
fg_value (``int``, optional):
Only for mode ``ball``.
The value to use to rasterize points, defaults to 1.
bg_value (``int``, optional):
Only for mode ``ball``.
The value to use to for the background in the output array,
defaults to 0.
edges (``bool``, optional):
Whether to rasterize edges by linearly interpolating between Nodes.
Default is True.
color_attr (``str``, optional)
Which graph attribute to use for coloring nodes and edges. One
useful example might be `component` which would color your graph
based on the component labels.
Notes:
- Only available in "ball" mode
- Nodes and Edges missing the attribute will be skipped.
- color_attr must be populated for nodes and edges upstream of this node
"""
def __init__(
self,
radius,
mode="ball",
mask=None,
inner_radius_fraction=None,
fg_value=1,
bg_value=0,
edges=True,
color_attr=None,
):
radius = np.array([radius]).flatten().astype(np.float64)
if inner_radius_fraction is not None:
assert (
inner_radius_fraction > 0.0 and inner_radius_fraction < 1.0
), "Inner radius fraction has to be between (excluding) 0 and 1"
inner_radius_fraction = 1.0 - inner_radius_fraction
self.radius = radius
self.mode = mode
self.mask = mask
self.inner_radius_fraction = inner_radius_fraction
self.fg_value = fg_value
self.bg_value = bg_value
self.edges = edges
self.color_attr = color_attr
self.freeze()
class RasterizeGraph(BatchFilter):
"""Draw graphs into a binary array as balls/tubes of a given radius.
Args:
graph (:class:`GraphKey`):
The key of the graph to rasterize.
array (:class:`ArrayKey`):
The key of the binary array to create.
array_spec (:class:`ArraySpec`, optional):
The spec of the array to create. Use this to set the datatype and
voxel size.
settings (:class:`RasterizationSettings`, optional):
Which settings to use to rasterize the graph.
"""
def __init__(self, graph, array, array_spec=None, settings=None):
self.graph = graph
self.array = array
if array_spec is None:
self.array_spec = ArraySpec()
else:
self.array_spec = array_spec
if settings is None:
self.settings = RasterizationSettings(1)
else:
self.settings = settings
def setup(self):
graph_roi = self.spec[self.graph].roi
if self.array_spec.voxel_size is None:
self.array_spec.voxel_size = Coordinate((1,) * graph_roi.dims)
if self.array_spec.dtype is None:
if self.settings.mode == "ball":
self.array_spec.dtype = np.uint8
else:
self.array_spec.dtype = np.float32
self.array_spec.roi = graph_roi.copy()
self.provides(self.array, self.array_spec)
self.enable_autoskip()
def prepare(self, request):
if self.settings.mode == "ball":
context = np.ceil(self.settings.radius).astype(int)
elif self.settings.mode == "peak":
context = np.ceil(2 * self.settings.radius).astype(int)
else:
raise RuntimeError("unknown raster mode %s" % self.settings.mode)
dims = self.array_spec.roi.dims
if len(context) == 1:
context = context.repeat(dims)
# request graph in a larger area to get rasterization from outside
# graph
graph_roi = request[self.array].roi.grow(
Coordinate(context), Coordinate(context)
)
# however, restrict the request to the graph actually provided
graph_roi = graph_roi.intersect(self.spec[self.graph].roi)
deps = BatchRequest()
deps[self.graph] = GraphSpec(roi=graph_roi)
if self.settings.mask is not None:
mask_voxel_size = self.spec[self.settings.mask].voxel_size
assert (
self.spec[self.array].voxel_size == mask_voxel_size
), "Voxel size of mask and rasterized volume need to be equal"
new_mask_roi = graph_roi.snap_to_grid(mask_voxel_size)
deps[self.settings.mask] = ArraySpec(roi=new_mask_roi)
return deps
def process(self, batch, request):
graph = batch.graphs[self.graph]
mask = self.settings.mask
voxel_size = self.spec[self.array].voxel_size
# get roi used for creating the new array (graph_roi does not
# necessarily align with voxel size)
enlarged_vol_roi = graph.spec.roi.snap_to_grid(voxel_size)
offset = enlarged_vol_roi.begin / voxel_size
shape = enlarged_vol_roi.shape / voxel_size
data_roi = Roi(offset, shape)
logger.debug("Graph in %s", graph.spec.roi)
for node in graph.nodes:
logger.debug("%d, %s", node.id, node.location)
logger.debug("Data roi in voxels: %s", data_roi)
logger.debug("Data roi in world units: %s", data_roi * voxel_size)
if graph.num_vertices == 0:
# If there are no nodes at all, just create an empty matrix.
rasterized_graph_data = np.zeros(
data_roi.shape, dtype=self.spec[self.array].dtype
)
elif mask is not None:
mask_array = batch.arrays[mask].crop(enlarged_vol_roi)
# get those component labels in the mask, that contain graph
labels = []
for i, point in graph.data.items():
v = Coordinate(point.location / voxel_size)
v -= data_roi.begin
labels.append(mask_array.data[v])
# Make list unique
labels = list(set(labels))
# zero label should be ignored
if 0 in labels:
labels.remove(0)
if len(labels) == 0:
logger.debug(
"Graph and provided object mask do not overlap. No graph to rasterize."
)
rasterized_graph_data = np.zeros(
data_roi.shape, dtype=self.spec[self.array].dtype
)
else:
# create data for the whole graph ROI, "or"ed together over
# individual object masks
rasterized_graph_data = np.sum(
[
self.__rasterize(
graph,
data_roi,
voxel_size,
self.spec[self.array].dtype,
self.settings,
Array(data=mask_array.data == label, spec=mask_array.spec),
)
for label in labels
],
axis=0,
)
else:
# create data for the whole graph ROI without mask
rasterized_graph_data = self.__rasterize(
graph, data_roi, voxel_size, self.spec[self.array].dtype, self.settings
)
# fix bg/fg labelling if requested
if self.settings.bg_value != 0 or self.settings.fg_value != 1:
replaced = replace(
rasterized_graph_data,
[0, 1],
[self.settings.bg_value, self.settings.fg_value],
)
rasterized_graph_data = replaced.astype(self.spec[self.array].dtype)
# create array and crop it to requested roi
spec = self.spec[self.array].copy()
spec.roi = data_roi * voxel_size
rasterized_points = Array(data=rasterized_graph_data, spec=spec)
batch[self.array] = rasterized_points.crop(request[self.array].roi)
def __rasterize(
self, graph, data_roi, voxel_size, dtype, settings, mask_array=None
):
"""Rasterize 'graph' into an array with the given 'voxel_size"""
mask = mask_array.data if mask_array is not None else None
logger.debug("Rasterizing graph in %s", graph.spec.roi)
# prepare output array
rasterized_graph = np.zeros(data_roi.shape, dtype=dtype)
# Fast rasterization currently only implemented for mode ball without
# inner radius set
use_fast_rasterization = (
settings.mode == "ball"
and settings.inner_radius_fraction is None
and len(list(graph.edges)) == 0
)
if use_fast_rasterization:
dims = len(rasterized_graph.shape)
# get structuring element for mode ball
ball_kernel = create_ball_kernel(settings.radius, voxel_size)
radius_voxel = Coordinate(np.array(ball_kernel.shape) / 2)
data_roi_base = Roi(
offset=Coordinate((0,) * dims), shape=Coordinate(rasterized_graph.shape)
)
kernel_roi_base = Roi(
offset=Coordinate((0,) * dims), shape=Coordinate(ball_kernel.shape)
)
# Rasterize volume either with single voxel or with defined struct elememt
for node in graph.nodes:
# get the voxel coordinate, 'Coordinate' ensures integer
v = Coordinate(node.location / voxel_size)
# get the voxel coordinate relative to output array start
v -= data_roi.begin
# skip graph outside of mask
if mask is not None and not mask[v]:
continue
logger.debug(
"Rasterizing node %s at %s",
node.location,
node.location / voxel_size - data_roi.begin,
)
if use_fast_rasterization:
# Calculate where to crop the kernel mask and the rasterized array
shifted_kernel = kernel_roi_base.shift(v - radius_voxel)
shifted_data = data_roi_base.shift(-(v - radius_voxel))
arr_crop = data_roi_base.intersect(shifted_kernel)
kernel_crop = kernel_roi_base.intersect(shifted_data)
arr_crop_ind = arr_crop.get_bounding_box()
kernel_crop_ind = kernel_crop.get_bounding_box()
rasterized_graph[arr_crop_ind] = np.logical_or(
ball_kernel[kernel_crop_ind], rasterized_graph[arr_crop_ind]
)
else:
if settings.color_attr is not None:
c = graph.nodes[node].get(settings.color_attr)
if c is None:
logger.debug(f"Skipping node: {node}")
continue
elif np.isclose(c, 1) and not np.isclose(settings.fg_value, 1):
logger.warning(
f"Node {node} is being colored with color {c} according to "
f"attribute {settings.color_attr} "
f"but color 1 will be replaced with fg_value: {settings.fg_value}"
)
else:
c = 1
rasterized_graph[v] = c
if settings.edges:
for e in graph.edges:
if settings.color_attr is not None:
c = graph.edges[e].get(settings.color_attr)
if c is None:
continue
elif np.isclose(c, 1) and not np.isclose(settings.fg_value, 1):
logger.warning(
f"Edge {e} is being colored with color {c} according to "
f"attribute {settings.color_attr} "
f"but color 1 will be replaced with fg_value: {settings.fg_value}"
)
u = graph.node(e.u)
v = graph.node(e.v)
u_coord = Coordinate(u.location / voxel_size)
v_coord = Coordinate(v.location / voxel_size)
line = draw.line_nd(u_coord, v_coord, endpoint=True)
rasterized_graph[line] = 1
# grow graph
if not use_fast_rasterization:
if settings.mode == "ball":
enlarge_binary_map(
rasterized_graph,
settings.radius,
voxel_size,
settings.inner_radius_fraction,
in_place=True,
)
else:
sigmas = settings.radius / voxel_size
gaussian_filter(
rasterized_graph, sigmas, output=rasterized_graph, mode="constant"
)
# renormalize to have 1 be the highest value
max_value = np.max(rasterized_graph)
if max_value > 0:
rasterized_graph /= max_value
if mask_array is not None:
# use more efficient bitwise operation when possible
if settings.mode == "ball":
rasterized_graph &= mask
else:
rasterized_graph *= mask
return rasterized_graph
| 15,235 | 36.07056 | 94 | py |
gunpowder | gunpowder-master/gunpowder/nodes/deform_augment.py | from .batch_filter import BatchFilter
from gunpowder.batch import Batch
from gunpowder.batch_request import BatchRequest
from gunpowder.coordinate import Coordinate
from gunpowder.roi import Roi
from gunpowder.array import ArrayKey, Array
from gunpowder.array_spec import ArraySpec
from augment.transform import (
create_3D_rotation_transformation,
create_elastic_transformation,
create_identity_transformation,
create_rotation_transformation,
)
from augment.augment import apply_transformation, upscale_transformation
import numpy as np
from scipy import ndimage
from scipy.spatial.transform import Rotation
import logging
import math
import random
logger = logging.getLogger(__name__)
# TODO: Add half voxel to points
class DeformAugment(BatchFilter):
"""Elasticly deform a batch. Requests larger batches upstream to avoid data
loss due to rotation and jitter.
Args:
control_point_spacing (``tuple`` of ``int``):
Distance between control points for the elastic deformation, in
physical units per dimension.
jitter_sigma (``tuple`` of ``float``):
Standard deviation of control point jitter distribution, in physical units
per dimension.
scale_interval (``tuple`` of two ``floats``):
Interval to randomly sample scale factors from.
subsample (``int``):
Instead of creating an elastic transformation on the full
resolution, create one subsampled by the given factor, and linearly
interpolate to obtain the full resolution transformation. This can
significantly speed up this node, at the expense of having visible
piecewise linear deformations for large factors. Usually, a factor
of 4 can savely by used without noticable changes. However, the
default is 1 (i.e., no subsampling).
spatial_dims (``int``):
The number of spatial dimensions in arrays. Spatial dimensions are
assumed to be the last ones and cannot be more than 3 (default).
Set this value here to avoid treating channels as spacial
dimension. If, for example, your array is indexed as ``(c,y,x)``
(2D plus channels), you would want to set ``spatial_dims=2`` to
perform the elastic deformation only on x and y.
use_fast_points_transform (``bool``):
By solving for all of your points simultaneously with the following
3 step proceedure:
1) Rasterize nodes into numpy array
2) Apply elastic transform to array
3) Read out nodes via center of mass of transformed points
You can gain substantial speed up as opposed to calculating the
elastic transform for each point individually. However this may
lead to nodes being lost during the transform.
recompute_missing_points (``bool``):
Whether or not to compute the elastic transform node wise for nodes
that were lossed during the fast elastic transform process.
"""
def __init__(
self,
control_point_spacing: Coordinate,
jitter_sigma: Coordinate,
scale_interval=(1.0, 1.0),
rotate: bool = True,
subsample=1,
spatial_dims=3,
use_fast_points_transform=False,
recompute_missing_points=True,
transform_key: ArrayKey = None,
graph_raster_voxel_size: Coordinate = None,
):
self.control_point_spacing = Coordinate(control_point_spacing)
self.jitter_sigma = Coordinate(jitter_sigma)
self.scale_min = scale_interval[0]
self.scale_max = scale_interval[1]
self.rotate = rotate
self.subsample = subsample
self.spatial_dims = spatial_dims
self.use_fast_points_transform = use_fast_points_transform
self.recompute_missing_points = recompute_missing_points
self.transform_key = transform_key
self.graph_raster_voxel_size = Coordinate(graph_raster_voxel_size)
assert (
self.control_point_spacing.dims
== self.jitter_sigma.dims
== self.graph_raster_voxel_size.dims
)
def setup(self):
if self.transform_key is not None:
upstream_roi = self.spec.get_total_roi()
upstream_roi = Roi(
upstream_roi.offset[-self.spatial_dims :],
upstream_roi.shape[-self.spatial_dims :],
).snap_to_grid(self.control_point_spacing, mode="shrink")
spec = ArraySpec(
roi=upstream_roi,
voxel_size=self.control_point_spacing,
interpolatable=True,
)
self.provides(self.transform_key, spec)
def prepare(self, request):
seed = request.random_seed
random.seed(seed)
np.random.seed(seed)
# get the total ROI of all requests
total_roi = request.get_total_roi()
logger.debug("total ROI is %s" % total_roi)
# First, get the total ROI of the request in spatial dimensions only.
# Channels and time don't matter. This is our master ROI.
# get master ROI
master_roi = Roi(
total_roi.begin[-self.spatial_dims :],
total_roi.shape[-self.spatial_dims :],
)
self.spatial_dims = master_roi.dims
logger.debug("master ROI is %s" % master_roi)
# make sure the master ROI aligns with the control point spacing
master_roi_snapped = master_roi.snap_to_grid(
self.control_point_spacing, mode="grow"
)
logger.debug(
"master ROI aligned with control points is %s" % master_roi_snapped
)
# grow by 1 control point spacing
master_roi_snapped = master_roi_snapped.grow(
self.control_point_spacing, self.control_point_spacing
)
# get master roi in control point spacing
master_roi_sampled = master_roi_snapped / self.control_point_spacing
logger.debug("master ROI in control point spacing is %s" % master_roi_sampled)
# Second, create a master transformation. This is a transformation that
# covers all voxels of the all requested ROIs. The master transformation
# is zero-based, all transformations are relative to the origin of master_roi_snapped
self.master_transformation_spec = ArraySpec(
master_roi_snapped, self.control_point_spacing, interpolatable=True
)
(
self.master_transformation,
self.local_transformation,
) = self.__create_transformation(self.master_transformation_spec)
# Third, sample the master transformation for each of the
# smaller requested ROIs at their respective voxel resolution.
# crop the parts corresponding to the requested ROIs
self.transformations = {}
deps = BatchRequest()
for key, spec in request.items():
if key == self.transform_key:
continue
spec = spec.copy()
if spec.roi is None:
continue
# get target roi and target spacing (voxel size for arrays or just control point
# spacing for graphs)
target_roi = Roi(
spec.roi.begin[-self.spatial_dims :],
spec.roi.shape[-self.spatial_dims :],
)
# get voxel size of arrays or use graph_raster_voxel_size for graphs
if isinstance(key, ArrayKey):
voxel_size = Coordinate(self.spec[key].voxel_size)
else:
# must select voxel size for the graph spec because otherwise we would
# interpolate the transformation onto a spacing of 1 which may be
# way too large
voxel_size = self.graph_raster_voxel_size
# grow target_roi by 1 voxel, this allows us catch nodes that project
# outside our bounds
target_roi = target_roi.grow(voxel_size, voxel_size)
assert (
voxel_size is not None
), "Please provide a graph_raster_voxel_size when deforming graphs"
# use only spatial dims for transformations
voxel_size = Coordinate(voxel_size[-self.spatial_dims :])
target_spatial_roi = Roi(
target_roi.offset[-self.spatial_dims :],
target_roi.shape[-self.spatial_dims :],
)
transform_spec = ArraySpec(
target_spatial_roi.snap_to_grid(voxel_size), voxel_size
)
# we save transformations that have been sampled for specific ROI's and voxel sizes,
# no need to recompute. This can save time if you are requesting multiple arrays of
# the same voxel size and shape
if (
target_spatial_roi.offset,
target_spatial_roi.shape,
voxel_size,
) in self.transformations:
transformation = self.transformations[
(target_spatial_roi.offset, target_spatial_roi.shape, voxel_size)
]
else:
# sample the master transformation at the voxel spacing of each array
transformation = self.__sample_transform(
self.master_transformation, transform_spec
)
self.transformations[
(target_spatial_roi.offset, target_spatial_roi.shape, voxel_size)
] = transformation
# get ROI of all control points necessary to perform transformation
#
# for that we follow the same transformations to get from the
# request ROI to the target ROI in master ROI in control points, just in
# reverse
source_roi = self.__get_source_roi(transformation)
# update upstream request
spec.roi = Roi(
spec.roi.begin[: -self.spatial_dims]
+ source_roi.begin[-self.spatial_dims :],
spec.roi.shape[: -self.spatial_dims]
+ source_roi.shape[-self.spatial_dims :],
)
deps[key] = spec
logger.debug("upstream request roi for %s = %s" % (key, spec.roi))
return deps
def process(self, batch, request):
out_batch = Batch()
for array_key, array in batch.arrays.items():
request_roi = Roi(
request[array_key].roi.offset[-self.spatial_dims :],
request[array_key].roi.shape[-self.spatial_dims :],
)
voxel_size = Coordinate(array.spec.voxel_size[-self.spatial_dims :])
assert (
request_roi.offset,
request_roi.shape,
voxel_size,
) in self.transformations, f"{(request_roi.offset, request_roi.shape, voxel_size)} not in {list(self.transformations.keys())}"
# reshape array data into (channels,) + spatial dims
transformed_array = self.__apply_transform(
array,
self.transformations[
(request_roi.offset, request_roi.shape, voxel_size)
],
)
out_batch[array_key] = transformed_array
for graph_key, graph in batch.graphs.items():
target_roi = Roi(
request[graph_key].roi.offset[-self.spatial_dims :],
request[graph_key].roi.shape[-self.spatial_dims :],
)
transform_roi = target_roi.grow(
self.graph_raster_voxel_size, self.graph_raster_voxel_size
)
source_roi = Roi(
graph.spec.roi.offset[-self.spatial_dims :],
graph.spec.roi.shape[-self.spatial_dims :],
)
nodes = list(graph.nodes)
if self.use_fast_points_transform:
missed_nodes = self.__fast_point_projection(
self.transformations[
transform_roi.offset,
transform_roi.shape,
self.graph_raster_voxel_size,
],
nodes,
source_roi,
target_roi=transform_roi,
)
if not self.recompute_missing_points:
for node in set(missed_nodes):
graph.remove_node(node, retain_connectivity=True)
missed_nodes = []
else:
missed_nodes = nodes
for node in missed_nodes:
# logger.debug("projecting %s", node.location)
# get location relative to beginning of upstream ROI
location = node.location
# get spatial coordinates of node
location_spatial = location[-self.spatial_dims :]
# get projected location in transformation data space, this
# yields voxel coordinates relative to target ROI
projected = self.__project(
self.transformations[
transform_roi.offset,
transform_roi.shape,
self.graph_raster_voxel_size,
],
location_spatial,
)
logger.debug("projected: %s", projected)
# update spatial coordinates of node location
node.location[-self.spatial_dims :] = projected
logger.debug("final location: %s", node.location)
out_batch[graph_key] = graph.crop(target_roi)
if self.transform_key is not None:
out_batch[self.transform_key] = self.local_transformation
return out_batch
def __apply_transform(self, array: Array, transformation: Array) -> Array:
input_shape = array.data.shape
output_shape = transformation.data.shape
channel_shape = input_shape[: -self.spatial_dims]
data = array.data.reshape((-1,) + input_shape[-self.spatial_dims :])
offset = array.spec.roi.offset[-self.spatial_dims :]
voxel_size = array.spec.voxel_size[-self.spatial_dims :]
# apply transformation on each channel
transform = transformation.data.copy()
transform -= np.array(offset).reshape((-1,) + (1,) * self.spatial_dims)
transform /= np.array(voxel_size).reshape((-1,) + (1,) * self.spatial_dims)
data = np.array(
[
apply_transformation(
data[c],
transform,
interpolate=array.spec.interpolatable,
)
for c in range(data.shape[0])
]
)
spec = array.spec.copy()
spec.roi = Roi(
spec.roi.offset[: -self.spatial_dims] + transformation.spec.roi.offset[:],
spec.roi.shape[: -self.spatial_dims] + transformation.spec.roi.shape[:],
)
return Array(
data.reshape(channel_shape + output_shape[-self.spatial_dims :]), spec
)
def __sample_transform(
self,
transformation: Array,
output_spec: ArraySpec,
interpolate_order=1,
) -> Array:
if output_spec.voxel_size == transformation.spec.voxel_size:
# if voxel_size == control_point_spacing we can simply slice into the master roi
relative_output_roi = (
output_spec.roi - transformation.spec.roi.offset
).snap_to_grid(output_spec.voxel_size) / output_spec.voxel_size
sampled = np.copy(
transformation.data[
(slice(None),) + relative_output_roi.get_bounding_box()
]
)
return Array(
sampled,
ArraySpec(
output_spec.roi.snap_to_grid(output_spec.voxel_size),
output_spec.voxel_size,
interpolatable=True,
),
)
dims = len(output_spec.voxel_size)
output_shape = output_spec.roi.shape / output_spec.voxel_size
offset = np.array(
[
o / s
for o, s in zip(
output_spec.roi.offset - transformation.spec.roi.offset,
transformation.spec.voxel_size,
)
]
)
step = np.array(
[
o / i
for o, i in zip(output_spec.voxel_size, transformation.spec.voxel_size)
]
)
coordinates = np.meshgrid(
range(dims),
*[
np.linspace(o, (shape - 1) * step + o, shape)
for o, shape, step in zip(offset, output_shape, step)
],
indexing="ij",
)
coordinates = np.stack(coordinates)
sampled = ndimage.map_coordinates(
transformation.data,
coordinates=coordinates,
order=3,
mode="nearest",
)
return Array(sampled, ArraySpec(output_spec.roi, output_spec.voxel_size))
def __create_transformation(self, target_spec: ArraySpec):
scale = self.scale_min + random.random() * (self.scale_max - self.scale_min)
target_shape = target_spec.roi.shape / target_spec.voxel_size
global_transformation = create_identity_transformation(
target_shape,
subsample=self.subsample,
scale=scale,
)
local_transformation = np.zeros_like(global_transformation)
if sum(self.jitter_sigma) > 0:
el_transformation = create_elastic_transformation(
target_shape,
1,
np.array(self.jitter_sigma) / self.control_point_spacing,
subsample=self.subsample,
)
local_transformation += el_transformation
if self.rotate:
assert min(target_spec.voxel_size) == max(
target_spec.voxel_size
), "Only isotropic control point spacing supported when rotating"
if self.spatial_dims == 2:
rot_transformation = create_rotation_transformation(
target_shape,
random.random() * math.pi,
)
else:
angle = Rotation.random()
rot_transformation = create_3D_rotation_transformation(
target_shape, angle
)
local_transformation += rot_transformation
if self.subsample > 1:
local_transformation = upscale_transformation(
local_transformation, target_shape
)
# transform into world units
global_transformation *= np.array(target_spec.voxel_size).reshape(
(len(target_spec.voxel_size),) + (1,) * self.spatial_dims
)
global_transformation += np.array(target_spec.roi.offset).reshape(
(len(target_spec.roi.offset),) + (1,) * self.spatial_dims
)
local_transformation *= np.array(target_spec.voxel_size).reshape(
(len(target_spec.voxel_size),) + (1,) * self.spatial_dims
)
return (
Array(global_transformation + local_transformation, target_spec),
Array(local_transformation, target_spec),
)
def __fast_point_projection(self, transformation, nodes, source_roi, target_roi):
if len(nodes) < 1:
return []
# rasterize the points into an array
ids, locs = zip(
*[
(
node.id,
(
np.floor(node.location[-self.spatial_dims :]).astype(int)
- source_roi.begin
)
// self.graph_raster_voxel_size,
)
for node in nodes
if source_roi.contains(node.location)
]
)
ids, locs = np.array(ids), tuple(zip(*locs))
points_array = np.zeros(
source_roi.shape / self.graph_raster_voxel_size, dtype=np.int64
)
points_array[locs] = ids
# reshape array data into (channels,) + spatial dims
shape = points_array.shape
data = points_array.reshape((-1,) + shape[-self.spatial_dims :])
array = Array(
data,
ArraySpec(
Roi(
source_roi.begin[-self.spatial_dims :],
Coordinate(shape) * self.graph_raster_voxel_size,
),
self.graph_raster_voxel_size,
),
)
transformed = self.__apply_transform(array, transformation)
data = transformed.data
missing_points = []
projected_locs = ndimage.center_of_mass(data > 0, data, ids)
projected_locs = [
(np.array(loc[-self.spatial_dims :]) + 0.5) * self.graph_raster_voxel_size
+ transformation.spec.roi.begin
for loc in projected_locs
]
node_dict = {node.id: node for node in nodes}
for point_id, proj_loc in zip(ids, projected_locs):
point = node_dict.pop(point_id)
if not any([np.isnan(x) for x in proj_loc]):
assert (
len(proj_loc) == self.spatial_dims
), "projected location has wrong number of dimensions: {}, expected: {}".format(
len(proj_loc), self.spatial_dims
)
point.location[-self.spatial_dims :] = proj_loc
else:
missing_points.append(point)
for node in node_dict.values():
missing_points.append(node)
logger.debug(
"{} of {} points lost in fast points projection".format(
len(missing_points), len(ids)
)
)
return missing_points
def __project(self, transformation: Array, location: np.ndarray) -> np.ndarray:
"""Find the projection of location given by transformation. Returns None
if projection lies outside of transformation."""
dims = len(location)
# subtract location from transformation
diff = transformation.data.copy()
for d in range(dims):
diff[d] -= location[d]
# square
diff2 = diff * diff
# sum
dist = diff2.sum(axis=0)
# find grid point closes to location
center_grid = Coordinate(np.unravel_index(dist.argmin(), dist.shape))
center_source = self.__source_at(transformation, center_grid)
logger.debug("projecting %s onto grid", location)
logger.debug("grid shape: %s", transformation.data.shape[1:])
logger.debug("grid projection: %s", center_grid)
logger.debug("dist shape: %s", dist.shape)
logger.debug("dist.argmin(): %s", dist.argmin())
logger.debug("dist[argmin]: %s", dist[center_grid])
logger.debug(
"transform[argmin]: %s", transformation.data[(slice(None),) + center_grid]
)
logger.debug("min dist: %s", dist.min())
logger.debug("center source: %s", center_source)
# add a half voxel step to localize each transformed point to the center of the
# closest voxel
return (
np.array(center_grid, dtype=np.float32) + 0.5
) * transformation.spec.voxel_size + transformation.spec.roi.offset
def __source_at(self, transformation, index):
"""Read the source point of a transformation at index."""
slices = (slice(None),) + tuple(slice(i, i + 1) for i in index)
return transformation.data[slices].flatten()
def __get_source_roi(self, transformation):
# this gets you the source_roi in offset space. We need to add 1 voxel
# to the shape to get the closed interval ROI
# get bounding box of needed data for transformation
bb_min = Coordinate(
int(math.floor(transformation.data[d].min()))
for d in range(transformation.spec.voxel_size.dims)
)
bb_max = Coordinate(
int(math.ceil(transformation.data[d].max())) + s
for d, s in zip(
range(transformation.spec.voxel_size.dims),
transformation.spec.voxel_size,
)
)
# create roi sufficiently large to feed transformation
source_roi = Roi(bb_min, bb_max - bb_min).snap_to_grid(
transformation.spec.voxel_size
)
return source_roi
def __shift_transformation(self, shift, transformation):
for d in range(transformation.shape[0]):
transformation[d] += shift[d]
| 24,947 | 37.205207 | 138 | py |
gunpowder | gunpowder-master/gunpowder/nodes/snapshot.py | import logging
import numpy as np
import os
from .batch_filter import BatchFilter
from gunpowder.batch_request import BatchRequest
from gunpowder.ext import h5py
from gunpowder.ext import ZarrFile
logger = logging.getLogger(__name__)
class Snapshot(BatchFilter):
"""Save a passing batch in an HDF file.
The default behaviour is to periodically save a snapshot after
``every`` iterations.
Data-dependent criteria for saving can be implemented by subclassing and
overwriting :func:`write_if`. This method is applied as an additional
filter to the batches picked for periodic saving. It should return ``True``
if a batch meets the criteria for saving.
Args:
dataset_names (``dict``, :class:`ArrayKey` -> ``string``):
A dictionary from array keys to names of the datasets to store them
in.
output_dir (``string``):
The directory to save the snapshots. Will be created, if it does
not exist.
output_filename (``string``):
Template for output filenames. ``{id}`` in the string will be
replaced with the ID of the batch. ``{iteration}`` with the training
iteration (if training was performed on this batch).
every (``int``):
How often to save a batch. ``every=1`` indicates that every batch
will be stored, ``every=2`` every second and so on. By default,
every batch will be stored.
additional_request (:class:`BatchRequest`):
An additional batch request to merge with the passing request, if a
snapshot is to be made. If not given, only the arrays that are in
the batch anyway are recorded. This is useful to request additional
arrays like loss gradients for visualization that are otherwise not
needed.
compression_type (``string`` or ``int``):
Compression strategy. Legal values are ``gzip``, ``szip``,
``lzf``. If an integer between 1 and 10, this indicates ``gzip``
compression level.
dataset_dtypes (``dict``, :class:`ArrayKey` -> data type):
A dictionary from array keys to datatype (eg. ``np.int8``). If
given, arrays are stored using this type. The original arrays
within the pipeline remain unchanged.
store_value_range (``bool``):
If set to ``True``, store range of values in data set attributes.
"""
def __init__(
self,
dataset_names,
output_dir="snapshots",
output_filename="{id}.zarr",
every=1,
additional_request=None,
compression_type=None,
dataset_dtypes=None,
store_value_range=False,
):
self.dataset_names = dataset_names
self.output_dir = output_dir
self.output_filename = output_filename
self.every = max(1, every)
self.additional_request = (
BatchRequest() if additional_request is None else additional_request
)
self.n = 0
self.compression_type = compression_type
self.store_value_range = store_value_range
if dataset_dtypes is None:
self.dataset_dtypes = {}
else:
self.dataset_dtypes = dataset_dtypes
self.mode = "w"
def write_if(self, batch):
"""To be implemented in subclasses.
This function is run in :func:`process` and acts as a data-dependent
filter for saving snapshots.
Args:
batch (:class:`Batch`):
The batch received from upstream.
Returns:
``True`` if ``batch`` should be written to snapshot, ``False``
otherwise.
"""
return True
def setup(self):
for key, _ in self.additional_request.items():
assert key in self.dataset_names, (
"%s requested but not in dataset_names" % key
)
for array_key in self.additional_request.array_specs.keys():
spec = self.spec[array_key]
self.updates(array_key, spec)
for graph_key in self.additional_request.graph_specs.keys():
spec = self.spec[graph_key]
self.updates(graph_key, spec)
def prepare(self, request):
deps = BatchRequest()
for key, spec in request.items():
if key in self.dataset_names:
deps[key] = spec
self.record_snapshot = self.n % self.every == 0
if self.record_snapshot:
# append additional array requests, don't overwrite existing ones
for array_key, spec in self.additional_request.array_specs.items():
if array_key not in deps:
deps[array_key] = spec
for graph_key, spec in self.additional_request.graph_specs.items():
if graph_key not in deps:
deps[graph_key] = spec
for key in self.dataset_names.keys():
assert key in deps, "%s wanted for %s, but not in request." % (
key,
self.name(),
)
return deps
def process(self, batch, request):
if self.record_snapshot and self.write_if(batch):
try:
os.makedirs(self.output_dir)
except:
pass
snapshot_name = os.path.join(
self.output_dir,
self.output_filename.format(
id=str(batch.id).zfill(8), iteration=int(batch.iteration or 0)
),
)
logger.info("saving to %s" % snapshot_name)
if snapshot_name.endswith(".hdf"):
open_func = h5py.File
elif snapshot_name.endswith(".zarr"):
open_func = ZarrFile
else:
logger.warning("ambiguous file type")
open_func = h5py.File
with open_func(snapshot_name, self.mode) as f:
for array_key, array in batch.arrays.items():
if array_key not in self.dataset_names:
continue
ds_name = self.dataset_names[array_key]
if array_key in self.dataset_dtypes:
dtype = self.dataset_dtypes[array_key]
dataset = f.create_dataset(
name=ds_name,
data=array.data.astype(dtype),
compression=self.compression_type,
)
else:
dataset = f.create_dataset(
name=ds_name,
data=array.data,
compression=self.compression_type,
)
if not array.spec.nonspatial:
if array.spec.roi is not None:
dataset.attrs["offset"] = array.spec.roi.offset
dataset.attrs["resolution"] = self.spec[array_key].voxel_size
if self.store_value_range:
dataset.attrs["value_range"] = (
np.asscalar(array.data.min()),
np.asscalar(array.data.max()),
)
# if array has attributes, add them to the dataset
for attribute_name, attribute in array.attrs.items():
dataset.attrs[attribute_name] = attribute
for graph_key, graph in batch.graphs.items():
if graph_key not in self.dataset_names:
continue
ds_name = self.dataset_names[graph_key]
node_ids = []
locations = []
edges = []
for node in graph.nodes:
node_ids.append(node.id)
locations.append(node.location)
for edge in graph.edges:
edges.append((edge.u, edge.v))
f.create_dataset(
name=f"{ds_name}-ids",
data=np.array(node_ids, dtype=int),
compression=self.compression_type,
)
f.create_dataset(
name=f"{ds_name}-locations",
data=np.array(locations),
compression=self.compression_type,
)
f.create_dataset(
name=f"{ds_name}-edges",
data=np.array(edges),
compression=self.compression_type,
)
if batch.loss is not None:
f["/"].attrs["loss"] = float(batch.loss)
self.n += 1
| 8,947 | 34.367589 | 85 | py |
gunpowder | gunpowder-master/gunpowder/nodes/simple_augment.py | import logging
import random
import itertools
import numpy as np
from .batch_filter import BatchFilter
from gunpowder.coordinate import Coordinate
logger = logging.getLogger(__name__)
class SimpleAugment(BatchFilter):
"""Randomly mirror and transpose all :class:`Arrays<Array>` and
:class:`Graph` in a batch.
Args:
mirror_only (``list`` of ``int``, optional):
If set, only mirror between the given axes. This is useful to
exclude channels that have a set direction, like time.
transpose_only (``list`` of ``int``, optional):
If set, only transpose between the given axes. This is useful to
limit the transpose to axes with the same resolution or to exclude
non-spatial dimensions.
mirror_probs (``list`` of ``float``, optional):
If set, provides the probability for mirroring given axes. Default
is 0.5 per axis. If given, must be given for every axis. i.e.
[0,1,0] for 100% chance of mirroring axis 1 an no others.
transpose_probs (``dict`` of ``tuple`` -> ``float``
or ``list`` of ``float``, optional):
The probability of transposing. If None, each transpose is equally
likely.
Can also be a dictionary of for ``tuple`` -> ``float``. For example
{(0,1,2):0.5, (1,0,2):0.5} to define a 50% chance of transposing axes
0 and 1. Note that if a provided option violates the `transpose_only`
arg it will be dropped and remaining options will be reweighted.
Can also be provided as a list of ``float``. i.e. [0.3, 0.5, 0.7].
This will automatically generate a list of possible permutations
and attempt to weight them appropriately. A weight of 0 means
this axis will never be transposed, a weight of 1 means this axis
will always be transposed.
"""
def __init__(
self,
mirror_only=None,
transpose_only=None,
mirror_probs=None,
transpose_probs=None,
):
self.mirror_only = mirror_only
self.mirror_probs = mirror_probs
self.transpose_only = transpose_only
self.transpose_probs = transpose_probs
self.mirror_mask = None
self.dims = None
self.transpose_dims = None
def setup(self):
self.dims = self.spec.get_total_roi().dims
# mirror_mask and transpose_dims refer to the indices of the spatial
# dimensions only, starting counting at 0 for the first spatial
# dimension
if self.mirror_only is None:
self.mirror_mask = [True] * self.dims
else:
self.mirror_mask = [d in self.mirror_only for d in range(self.dims)]
if self.mirror_probs is None:
self.mirror_probs = [0.5] * self.dims
if self.transpose_only is None:
self.transpose_dims = list(range(self.dims))
else:
self.transpose_dims = self.transpose_only
if self.transpose_probs is None:
self.permutation_dict = None
elif isinstance(self.transpose_probs, list):
self.permutation_dict = {}
for permutation in itertools.permutations(range(self.dims), self.dims):
total_prob = 1
for i, j, p in zip(range(self.dims), permutation, self.transpose_probs):
if i not in self.transpose_dims and i != j:
total_prob = 0
else:
total_prob *= (1 - p) if i == j else p
if total_prob > 0:
self.permutation_dict[permutation] = total_prob
elif isinstance(self.transpose_probs, dict):
self.permutation_dict = {}
for k, v in self.transpose_probs.items():
valid = True
for i, j in enumerate(k):
if i not in self.transpose_only and i != j:
valid = False
if valid:
self.permutation_dict[k] = v
def prepare(self, request):
random.seed(request.random_seed)
self.mirror = [
random.random() < self.mirror_probs[d] if self.mirror_mask[d] else 0
for d in range(self.dims)
]
if self.permutation_dict is not None:
t = random.choices(
list(self.permutation_dict.keys()),
weights=list(self.permutation_dict.values()),
k=1,
)[0]
else:
t = random.sample(self.transpose_dims, k=len(self.transpose_dims))
self.transpose = list(range(self.dims))
for o, n in zip(self.transpose_dims, t):
self.transpose[o] = n
logger.debug("mirror = %s", self.mirror)
logger.debug("transpose = %s", self.transpose)
reverse_transpose = [0] * self.dims
for d in range(self.dims):
reverse_transpose[self.transpose[d]] = d
logger.debug("downstream request = %s", request)
self.__transpose_request(request, reverse_transpose)
self.__mirror_request(request, self.mirror)
logger.debug("upstream request = %s", request)
return request
def process(self, batch, request):
# mirror and transpose ROIs of arrays & points in batch
total_roi = batch.get_total_roi().copy()
requested_keys = request.array_specs.keys()
lcm_voxel_size = self.spec.get_lcm_voxel_size(requested_keys)
for collection_type in [batch.arrays, batch.graphs]:
for key, collector in collection_type.items():
if key not in request:
continue
if collector.spec.roi is None:
continue
logger.debug("total ROI = %s", batch.get_total_roi())
logger.debug("upstream %s ROI = %s", key, collector.spec.roi)
self.__mirror_roi(collector.spec.roi, total_roi, self.mirror)
logger.debug("mirrored %s ROI = %s", key, collector.spec.roi)
self.__transpose_roi(
collector.spec.roi, total_roi, self.transpose, lcm_voxel_size
)
logger.debug("transposed %s ROI = %s", key, collector.spec.roi)
mirror = tuple(slice(None, None, -1 if m else 1) for m in self.mirror)
# arrays
for array_key, array in batch.arrays.items():
if array_key not in request:
continue
if array.spec.nonspatial:
continue
num_channels = len(array.data.shape) - self.dims
channel_slices = (slice(None, None),) * num_channels
array.data = array.data[channel_slices + mirror]
transpose = [t + num_channels for t in self.transpose]
array.data = array.data = array.data.transpose(
list(range(num_channels)) + transpose
)
# graphs
total_roi_offset = total_roi.offset
total_roi_center = total_roi.center
if lcm_voxel_size is not None:
nearest_voxel_shift = Coordinate(
(d % v) for d, v in zip(total_roi_center, lcm_voxel_size)
)
total_roi_center = total_roi_center - nearest_voxel_shift
total_roi_end = total_roi.end
logger.debug("augmenting in %s and center %s", total_roi, total_roi_center)
for graph_key, graph in batch.graphs.items():
if graph_key not in request:
continue
logger.debug("converting nodes in graph %s", graph_key)
for node in list(graph.nodes):
logger.debug("old location: %s, %s", node.id, node.location)
# mirror
location_in_total_offset = np.asarray(node.location) - total_roi_offset
node.location = np.asarray(
[
total_roi_end[dim] - location_in_total_offset[dim]
if m
else node.location[dim]
for dim, m in enumerate(self.mirror)
],
dtype=graph.spec.dtype,
)
logger.debug("after mirror: %s, %s", node.id, node.location)
# transpose
location_in_total_center = np.asarray(node.location) - total_roi_center
if self.transpose != list(range(self.dims)):
for d in range(self.dims):
node.location[d] = (
location_in_total_center[self.transpose[d]]
+ total_roi_center[d]
)
logger.debug("after transpose: %s, %s", node.id, node.location)
# due to the mirroring, points at the lower boundary of the ROI
# could fall on the upper one, which excludes them from the ROI
if not graph.spec.roi.contains(node.location):
graph.remove_node(node)
def __mirror_request(self, request, mirror):
total_roi = request.get_total_roi().copy()
for key, spec in request.items():
if spec.roi is not None:
self.__mirror_roi(spec.roi, total_roi, mirror)
def __transpose_request(self, request, transpose):
total_roi = request.get_total_roi().copy()
requested_keys = request.array_specs.keys()
lcm_voxel_size = self.spec.get_lcm_voxel_size(requested_keys)
for key, spec in request.items():
if spec.roi is not None:
self.__transpose_roi(spec.roi, total_roi, transpose, lcm_voxel_size)
def __mirror_roi(self, roi, total_roi, mirror):
total_roi_offset = total_roi.offset
total_roi_shape = total_roi.shape
roi_offset = roi.offset
roi_shape = roi.shape
roi_in_total_offset = roi_offset - total_roi_offset
end_of_roi_in_total = roi_in_total_offset + roi_shape
roi_in_total_offset_mirrored = total_roi_shape - end_of_roi_in_total
roi_offset = Coordinate(
total_roi_offset[d] + roi_in_total_offset_mirrored[d]
if mirror[d]
else roi_offset[d]
for d in range(self.dims)
)
roi.offset = roi_offset
def __transpose_roi(self, roi, total_roi, transpose, lcm_voxel_size):
logger.debug("original roi = %s", roi)
center = total_roi.center
if lcm_voxel_size is not None:
nearest_voxel_shift = Coordinate(
(d % v) for d, v in zip(center, lcm_voxel_size)
)
center = center - nearest_voxel_shift
logger.debug("center = %s", center)
# Get distance from center, then transpose
dist_to_center = center - roi.offset
dist_to_center = Coordinate(
dist_to_center[transpose[d]] for d in range(self.dims)
)
logger.debug("dist_to_center = %s", dist_to_center)
# Using the tranposed distance to center, get the correct offset.
new_offset = center - dist_to_center
logger.debug("new_offset = %s", new_offset)
shape = tuple(roi.shape[transpose[d]] for d in range(self.dims))
roi.offset = new_offset
roi.shape = shape
logger.debug("tranposed roi = %s", roi)
| 11,423 | 37.857143 | 88 | py |
gunpowder | gunpowder-master/gunpowder/nodes/grow_boundary.py | import numpy as np
from scipy import ndimage
from .batch_filter import BatchFilter
from gunpowder.array import Array
class GrowBoundary(BatchFilter):
"""Grow a boundary between regions in a label array. Does not grow at the
border of the batch or an optionally provided mask.
Args:
labels (:class:`ArrayKey`):
The array containing labels.
mask (:class:`ArrayKey`, optional):
A mask indicating unknown regions. This is to avoid boundaries to
grow between labelled and unknown regions.
steps (``int``, optional):
Number of voxels (not world units!) to grow.
background (``int``, optional):
The label to assign to the boundary voxels.
only_xy (``bool``, optional):
Do not grow a boundary in the z direction.
"""
def __init__(self, labels, mask=None, steps=1, background=0, only_xy=False):
self.labels = labels
self.mask = mask
self.steps = steps
self.background = background
self.only_xy = only_xy
def process(self, batch, request):
gt = batch.arrays[self.labels]
gt_mask = None if not self.mask else batch.arrays[self.mask]
if gt_mask is not None:
# grow only in area where mask and gt are defined
crop = gt_mask.spec.roi.intersect(gt.spec.roi)
if crop is None:
raise RuntimeError(
"GT_LABELS %s and GT_MASK %s ROIs don't intersect."
% (gt.spec.roi, gt_mask.spec.roi)
)
voxel_size = self.spec[self.labels].voxel_size
crop_in_gt = (
crop.shift(-gt.spec.roi.offset) / voxel_size
).get_bounding_box()
crop_in_gt_mask = (
crop.shift(-gt_mask.spec.roi.offset) / voxel_size
).get_bounding_box()
self.__grow(
gt.data[crop_in_gt], gt_mask.data[crop_in_gt_mask], self.only_xy
)
else:
self.__grow(gt.data, only_xy=self.only_xy)
def __grow(self, gt, gt_mask=None, only_xy=False):
if gt_mask is not None:
assert (
gt.shape == gt_mask.shape
), "GT_LABELS and GT_MASK do not have the same size."
if only_xy:
assert len(gt.shape) == 3
for z in range(gt.shape[0]):
self.__grow(gt[z], None if gt_mask is None else gt_mask[z])
return
# get all foreground voxels by erosion of each component
foreground = np.zeros(shape=gt.shape, dtype=bool)
masked = None
if gt_mask is not None:
masked = np.equal(gt_mask, 0)
for label in np.unique(gt):
if label == self.background:
continue
label_mask = gt == label
# Assume that masked out values are the same as the label we are
# eroding in this iteration. This ensures that at the boundary to
# a masked region the value blob is not shrinking.
if masked is not None:
label_mask = np.logical_or(label_mask, masked)
eroded_label_mask = ndimage.binary_erosion(
label_mask, iterations=self.steps, border_value=1
)
foreground = np.logical_or(eroded_label_mask, foreground)
# label new background
background = np.logical_not(foreground)
gt[background] = self.background
| 3,510 | 32.438095 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.