repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
RBNN | RBNN-master/imagenet/models_imagenet/resnet.py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.init as init
from modules import *
BN = None
__all__ = ['resnet18_1w1a', 'resnet34_1w1a']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
}
def conv3x3Binary(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return BinarizeConv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3Binary(inplanes, planes, stride)
self.bn1 = BN(planes)
self.nonlinear = nn.Hardtanh(inplace=True)
self.conv2 = conv3x3Binary(planes, planes)
self.bn2 = BN(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.nonlinear(out)
residual = out
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.nonlinear(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, deep_stem=False,
avg_down=False, bypass_last_bn=False,
bn_group_size=1,
bn_group=None,
bn_sync_stats=False,
use_sync_bn=True):
global BN, bypass_bn_weight_list
BN = nn.BatchNorm2d
bypass_bn_weight_list = []
self.inplanes = 64
super(ResNet, self).__init__()
self.deep_stem = deep_stem
self.avg_down = avg_down
if self.deep_stem:
self.conv1 = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1, bias=False),
BN(32),
nn.Hardtanh(inplace=True),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=False),
BN(32),
nn.Hardtanh(inplace=True),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False),
)
else:
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = BN(64)
self.nonlinear1 = nn.Hardtanh(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.bn2 = nn.BatchNorm1d(512 * block.expansion)
self.nonlinear2 = nn.Hardtanh(inplace=True)
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.bn3 = nn.BatchNorm1d(num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1e-8)
m.bias.data.zero_()
if bypass_last_bn:
for param in bypass_bn_weight_list:
param.data.zero_()
print('bypass {} bn.weight in BottleneckBlocks'.format(len(bypass_bn_weight_list)))
def _make_layer(self, block, planes, blocks, stride=1, avg_down=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if self.avg_down:
downsample = nn.Sequential(
nn.AvgPool2d(stride, stride=stride, ceil_mode=True, count_include_pad=False),
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=1, bias=False),
BN(planes * block.expansion),
)
else:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BN(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.nonlinear1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.bn2(x)
x = self.fc(x)
return x
def resnet18_1w1a(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34_1w1a(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
| 5,965 | 31.248649 | 97 | py |
RBNN | RBNN-master/imagenet/models_imagenet/__init__.py | from .resnet import * | 21 | 21 | 21 | py |
RBNN | RBNN-master/imagenet/models_cifar/resnet2.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from modules import *
__all__ =['resnet18A_1w1a','resnet18B_1w1a','resnet18C_1w1a','resnet18_1w1a']
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = BinarizeConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = BinarizeConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
BinarizeConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.hardtanh(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.hardtanh(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = BinarizeConv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = BinarizeConv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = BinarizeConv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
BinarizeConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_channel, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = num_channel[0]
self.conv1 = nn.Conv2d(3, num_channel[0], kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_channel[0])
self.layer1 = self._make_layer(block, num_channel[0], num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, num_channel[1], num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, num_channel[2], num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, num_channel[3], num_blocks[3], stride=2)
self.linear = nn.Linear(num_channel[3]*block.expansion, num_classes)
self.bn2 = nn.BatchNorm1d(num_channel[3]*block.expansion)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.bn1(self.conv1(x))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.bn2(out)
out = self.linear(out)
return out
def resnet18A_1w1a(**kwargs):
return ResNet(BasicBlock, [2,2,2,2],[32,32,64,128],**kwargs)
def resnet18B_1w1a(**kwargs):
return ResNet(BasicBlock, [2,2,2,2],[32,64,128,256],**kwargs)
def resnet18C_1w1a(**kwargs):
return ResNet(BasicBlock, [2,2,2,2],[64,64,128,256],**kwargs)
def resnet18_1w1a(**kwargs):
return ResNet(BasicBlock, [2,2,2,2],[64,128,256,512],**kwargs)
def ResNet34():
return ResNet(BasicBlock, [3,4,6,3])
def ResNet50():
return ResNet(Bottleneck, [3,4,6,3])
def ResNet101():
return ResNet(Bottleneck, [3,4,23,3])
def ResNet152():
return ResNet(Bottleneck, [3,8,36,3])
def test():
net = resnet18_1w1a()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
| 4,704 | 33.343066 | 107 | py |
RBNN | RBNN-master/imagenet/models_cifar/resnet.py | '''
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from modules import *
from torch.autograd import Variable
__all__ = ['resnet20_1w1a']
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class BasicBlock_1w1a(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock_1w1a, self).__init__()
self.conv1 = BinarizeConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = BinarizeConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
BinarizeConv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = self.bn1(self.conv1(x))
out = F.hardtanh(out)
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.hardtanh(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.bn2 = nn.BatchNorm1d(64)
self.linear = nn.Linear(64, num_classes)
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1e-8)
m.bias.data.zero_()
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.hardtanh(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.bn2(out)
out = self.linear(out)
return out
def resnet20_1w1a(**kwargs):
return ResNet(BasicBlock_1w1a, [3, 3, 3],**kwargs)
def resnet20():
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print()
| 6,508 | 31.708543 | 120 | py |
RBNN | RBNN-master/imagenet/models_cifar/vgg.py | '''VGG for CIFAR10. FC layers are removed.
(c) YANG, Wei
'''
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
from modules import *
__all__ = ['vgg_small_1w1a']
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.features = features
self.bn2 = nn.BatchNorm1d(512 * block.expansion)
self.nonlinear2 = nn.Hardtanh(inplace=True)
self.classifier = nn.Linear(512, num_classes)
self.bn3 = nn.BatchNorm1d(num_classes)
self.logsoftmax = nn.LogSoftmax(dim=1)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.bn2(x)
x = self.nonlinear2(x)
x = self.classifier(x)
x = self.bn3(x)
x = self.logsoftmax(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.Hardtanh(inplace=True)]
else:
layers += [conv2d, nn.Hardtanh(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
'F': [128, 128, 'M', 512, 512, 'M'],
}
def vgg11(**kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['A']), **kwargs)
return model
def vgg11_bn(**kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization"""
model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
return model
def vgg13(**kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['B']), **kwargs)
return model
def vgg13_bn(**kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization"""
model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)
return model
def vgg16(**kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['D']), **kwargs)
return model
def vgg16_bn(**kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization"""
model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)
return model
def vgg19(**kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['E']), **kwargs)
return model
def vgg19_bn(**kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization"""
model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)
return model
class VGG_SMALL_1W1A(nn.Module):
def __init__(self, num_classes=10):
super(VGG_SMALL_1W1A, self).__init__()
self.conv0 = nn.Conv2d(3, 128, kernel_size=3, padding=1, bias=False)
self.bn0 = nn.BatchNorm2d(128)
self.conv1 = BinarizeConv2d(128, 128, kernel_size=3, padding=1, bias=False)
self.pooling = nn.MaxPool2d(kernel_size=2, stride=2)
self.bn1 = nn.BatchNorm2d(128)
self.nonlinear = nn.Hardtanh(inplace=True)
self.conv2 = BinarizeConv2d(128, 256, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(256)
self.conv3 = BinarizeConv2d(256, 256, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(256)
self.conv4 = BinarizeConv2d(256, 512, kernel_size=3, padding=1, bias=False)
self.bn4 = nn.BatchNorm2d(512)
self.conv5 = BinarizeConv2d(512, 512, kernel_size=3, padding=1, bias=False)
self.bn5 = nn.BatchNorm2d(512)
self.fc = nn.Linear(512*4*4, num_classes)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, BinarizeConv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def forward(self, x):
x = self.conv0(x)
x = self.bn0(x)
x = self.nonlinear(x)
x = self.conv1(x)
x = self.pooling(x)
x = self.bn1(x)
x = self.nonlinear(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.nonlinear(x)
x = self.conv3(x)
x = self.pooling(x)
x = self.bn3(x)
x = self.nonlinear(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.nonlinear(x)
x = self.conv5(x)
x = self.pooling(x)
x = self.bn5(x)
x = self.nonlinear(x)
# x = self.pooling(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def vgg_small_1w1a(**kwargs):
model = VGG_SMALL_1W1A(**kwargs)
return model
| 7,083 | 30.766816 | 113 | py |
RBNN | RBNN-master/imagenet/models_cifar/__init__.py | from .resnet import *
from .resnet_bireal import *
from .resnet2 import *
from .vgg import * | 93 | 22.5 | 28 | py |
RBNN | RBNN-master/imagenet/models_cifar/resnet_bireal.py | '''
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from modules import *
from torch.autograd import Variable
__all__ = ['resnet20_bireal_1w1a']
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class BasicBlock_1w1a(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock_1w1a, self).__init__()
self.conv1 = BinarizeConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = BinarizeConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
BinarizeConv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = self.bn1(self.conv1(x))
out += self.shortcut(x)
out = F.hardtanh(out)
x1 = out
out = self.bn2(self.conv2(out))
out += x1
out = F.hardtanh(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.bn2 = nn.BatchNorm1d(64)
self.linear = nn.Linear(64, num_classes)
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1e-8)
m.bias.data.zero_()
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.hardtanh(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.bn2(out)
out = self.linear(out)
return out
def resnet20_bireal_1w1a(**kwargs):
return ResNet(BasicBlock_1w1a, [3, 3, 3],**kwargs)
def resnet20():
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print()
| 6,557 | 31.626866 | 120 | py |
RBNN | RBNN-master/imagenet/utils/common.py | import os
import torch
import logging.config
import shutil
import torch.nn as nn
import numpy
import datetime
def setup_logging(log_file='log.txt',filemode='w'):
"""Setup logging configuration
"""
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
filename=log_file,
filemode=filemode)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def save_checkpoint(state, is_best, path='.', filename='checkpoint.pth.tar', save_all=False):
filename = os.path.join(path, filename)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, os.path.join(path, 'model_best.pth.tar'))
if save_all:
shutil.copyfile(filename, os.path.join(
path, 'checkpoint_epoch_%s.pth.tar' % state['epoch']))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.float().topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def get_time(delta,epoch,epochs):
now = datetime.datetime.now()
clip = 0 if delta>=datetime.timedelta(hours=1) else 1
cost_time = ':'.join(str(delta).split(':')[clip:]).split('.')[0]
delta *= epochs-epoch-1
finish = now + delta
finish_time=finish.strftime('%Y-%m-%d %H:%M:%S')
return cost_time,finish_time
| 2,252 | 27.884615 | 93 | py |
RBNN | RBNN-master/imagenet/utils/options.py | import argparse
import os
"""
args
"""
parser = argparse.ArgumentParser(description='RotationNet')
# Logging
parser.add_argument(
'--results_dir',
metavar='RESULTS_DIR',
default='./results',
help='results dir')
parser.add_argument(
'--save',
metavar='SAVE',
default='',
help='saved folder (named by datetime)')
parser.add_argument(
'--resume',
dest='resume',
action='store_true',
help='resume to latest checkpoint')
parser.add_argument(
'-e',
'--evaluate',
type=str,
metavar='FILE',
help='evaluate model FILE on validation set')
parser.add_argument(
'--seed',
default=1234,
type=int,
help='random seed')
# Model
parser.add_argument(
'--model',
'-a',
metavar='MODEL',
default='resnet18_1w1a',
help='model architecture ')
parser.add_argument(
'--dataset',
default='imagenet',
type=str,
help='dataset, default:imagenet')
parser.add_argument(
'--data_path',
type=str,
default='/home/data',
help='The dictionary where the dataset is stored.')
parser.add_argument(
'--type',
default='torch.cuda.FloatTensor',
help='type of tensor - e.g torch.cuda.FloatTensor')
# Training
parser.add_argument(
'--gpus',
default='0',
help='gpus used for training - e.g 0,1,3')
parser.add_argument(
'--lr',
default=0.1,
type=float,
help='learning rate')
parser.add_argument(
'--weight_decay',
type=float,
default=1e-4,
help='Weight decay of loss. default:1e-4')
parser.add_argument(
'--momentum',
default=0.9,
type=float,
metavar='M',
help='momentum')
parser.add_argument(
'--workers',
default=8,
type=int,
metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument(
'--epochs',
default=150,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument(
'--start_epoch',
default=-1,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument(
'-b',
'--batch_size',
default=512,
type=int,
metavar='N',
help='mini-batch size for training (default: 256)')
parser.add_argument(
'-bt',
'--batch_size_test',
default=256,
type=int,
help='mini-batch size for testing (default: 128)')
parser.add_argument(
'--print_freq',
'-p',
default=500,
type=int,
metavar='N',
help='print frequency (default: 500)')
parser.add_argument(
'--time_estimate',
default=1,
type=int,
metavar='N',
help='print estimating finish time,set to 0 to disable')
parser.add_argument(
'--rotation_update',
default=1,
type=int,
metavar='N',
help='interval of updating rotation matrix (default:1)')
parser.add_argument(
'--Tmin',
default=1e-2,
type=float,
metavar='M',
help='minimum of T (default:1e-2)')
parser.add_argument(
'--Tmax',
default=1e1,
type=float,
metavar='M',
help='maximum of T (default:1e1)')
parser.add_argument(
'--lr_type',
type=str,
default='cos',
help='choose lr_scheduler,(default:cos)')
parser.add_argument(
'--lr_decay_step',
nargs='+',
type=int,
help='lr decay step for MultiStepLR')
parser.add_argument(
'--a32',
dest='a32',
action='store_true',
help='w1a32')
parser.add_argument(
'--warm_up',
dest='warm_up',
action='store_true',
help='use warm up or not')
parser.add_argument(
'--use_dali',
dest='use_dali',
action='store_true',
help='use DALI to load dataset or not')
args = parser.parse_args() | 3,679 | 18.067358 | 60 | py |
RBNN | RBNN-master/cifar/main.py | import argparse
import os
import time
import logging
import random
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import models_cifar
import models_imagenet
import numpy as np
from torch.autograd import Variable
from utils.options import args
from utils.common import *
from modules import *
from datetime import datetime
import dataset
def main():
global args, best_prec1, conv_modules
best_prec1 = 0
random.seed(args.seed)
if args.evaluate:
args.results_dir = '/tmp'
save_path = os.path.join(args.results_dir, args.save)
if not os.path.exists(save_path):
os.makedirs(save_path)
if not args.resume:
with open(os.path.join(save_path,'config.txt'), 'w') as args_file:
args_file.write(str(datetime.now())+'\n\n')
for args_n,args_v in args.__dict__.items():
args_v = '' if not args_v and not isinstance(args_v,int) else args_v
args_file.write(str(args_n)+': '+str(args_v)+'\n')
setup_logging(os.path.join(save_path, 'logger.log'))
logging.info("saving to %s", save_path)
logging.debug("run arguments: %s", args)
else:
setup_logging(os.path.join(save_path, 'logger.log'), filemode='a')
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
if 'cuda' in args.type:
args.gpus = [int(i) for i in args.gpus.split(',')]
cudnn.benchmark = True
else:
args.gpus = None
if args.dataset=='tinyimagenet':
num_classes=200
model_zoo = 'models_imagenet.'
elif args.dataset=='imagenet':
num_classes=1000
model_zoo = 'models_imagenet.'
elif args.dataset=='cifar10':
num_classes=10
model_zoo = 'models_cifar.'
elif args.dataset=='cifar100':
num_classes=100
model_zoo = 'models_cifar.'
if len(args.gpus)==1:
model = eval(model_zoo+args.model)(num_classes=num_classes).cuda()
else:
model = nn.DataParallel(eval(model_zoo+args.model)(num_classes=num_classes))
if not args.resume:
logging.info("creating model %s", args.model)
logging.info("model structure: %s", model)
num_parameters = sum([l.nelement() for l in model.parameters()])
logging.info("number of parameters: %d", num_parameters)
# evaluate
if args.evaluate:
if not os.path.isfile(args.evaluate):
logging.error('invalid checkpoint: {}'.format(args.evaluate))
else:
checkpoint = torch.load(args.evaluate)
if len(args.gpus)>1:
checkpoint['state_dict'] = dataset.add_module_fromdict(checkpoint['state_dict'])
model.load_state_dict(checkpoint['state_dict'])
logging.info("loaded checkpoint '%s' (epoch %s)",
args.evaluate, checkpoint['epoch'])
elif args.resume:
checkpoint_file = os.path.join(save_path,'checkpoint.pth.tar')
if os.path.isdir(checkpoint_file):
checkpoint_file = os.path.join(
checkpoint_file, 'model_best.pth.tar')
if os.path.isfile(checkpoint_file):
checkpoint = torch.load(checkpoint_file)
if len(args.gpus)>1:
checkpoint['state_dict'] = dataset.add_module_fromdict(checkpoint['state_dict'])
args.start_epoch = checkpoint['epoch'] - 1
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
logging.info("loaded checkpoint '%s' (epoch %s)",
checkpoint_file, checkpoint['epoch'])
else:
logging.error("no checkpoint found at '%s'", args.resume)
criterion = nn.CrossEntropyLoss().cuda()
criterion = criterion.type(args.type)
model = model.type(args.type)
if args.evaluate:
val_loader = dataset.load_data(
type='val',
dataset=args.dataset,
data_path=args.data_path,
batch_size=args.batch_size,
batch_size_test=args.batch_size_test,
num_workers=args.workers)
with torch.no_grad():
val_loss, val_prec1, val_prec5 = validate(val_loader, model, criterion, 0)
logging.info('\n Validation Loss {val_loss:.4f} \t'
'Validation Prec@1 {val_prec1:.3f} \t'
'Validation Prec@5 {val_prec5:.3f} \n'
.format(val_loss=val_loss, val_prec1=val_prec1, val_prec5=val_prec5))
return
if args.dataset=='imagenet':
train_loader = dataset.get_imagenet(
type='train',
image_dir=args.data_path,
batch_size=args.batch_size,
num_threads=args.workers,
crop=224,
device_id='cuda:0',
num_gpus=1)
val_loader = dataset.get_imagenet(
type='val',
image_dir=args.data_path,
batch_size=args.batch_size_test,
num_threads=args.workers,
crop=224,
device_id='cuda:0',
num_gpus=1)
else:
train_loader, val_loader = dataset.load_data(
dataset=args.dataset,
data_path=args.data_path,
batch_size=args.batch_size,
batch_size_test=args.batch_size_test,
num_workers=args.workers)
optimizer = torch.optim.SGD([{'params':model.parameters(),'initial_lr':args.lr}], args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
def cosin(i,T,emin=0,emax=0.01):
"customized cos-lr"
return emin+(emax-emin)/2 * (1+np.cos(i*np.pi/T))
if args.resume:
for param_group in optimizer.param_groups:
param_group['lr'] = cosin(args.start_epoch-args.warm_up*4, args.epochs-args.warm_up*4,0, args.lr)
if args.lr_type == 'cos':
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs-args.warm_up*4, eta_min = 0, last_epoch=args.start_epoch-args.warm_up*4)
elif args.lr_type == 'step':
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, args.lr_decay_step, gamma=0.1, last_epoch=-1)
if not args.resume:
logging.info("criterion: %s", criterion)
logging.info('scheduler: %s', lr_scheduler)
def cpt_tk(epoch):
"compute t&k in back-propagation"
T_min, T_max = torch.tensor(args.Tmin).float(), torch.tensor(args.Tmax).float()
Tmin, Tmax = torch.log10(T_min), torch.log10(T_max)
t = torch.tensor([torch.pow(torch.tensor(10.), Tmin + (Tmax - Tmin) / args.epochs * epoch)]).float()
k = max(1/t,torch.tensor(1.)).float()
return t, k
#* setup conv_modules.epoch
conv_modules=[]
for name,module in model.named_modules():
if isinstance(module,nn.Conv2d):
conv_modules.append(module)
for epoch in range(args.start_epoch+1, args.epochs):
time_start = datetime.now()
#*warm up
if args.warm_up and epoch <5:
for param_group in optimizer.param_groups:
param_group['lr'] = args.lr * (epoch+1) / 5
for param_group in optimizer.param_groups:
logging.info('lr: %s', param_group['lr'])
#* compute t/k in back-propagation
t,k = cpt_tk(epoch)
for name,module in model.named_modules():
if isinstance(module,nn.Conv2d):
module.k = k.cuda()
module.t = t.cuda()
for module in conv_modules:
module.epoch = epoch
# train
train_loss, train_prec1, train_prec5 = train(
train_loader, model, criterion, epoch, optimizer)
#* adjust Lr
if epoch >= 4 * args.warm_up:
lr_scheduler.step()
# evaluate
with torch.no_grad():
for module in conv_modules:
module.epoch = -1
val_loss, val_prec1, val_prec5 = validate(
val_loader, model, criterion, epoch)
# remember best prec
is_best = val_prec1 > best_prec1
if is_best:
best_prec1 = max(val_prec1, best_prec1)
best_epoch = epoch
best_loss = val_loss
# save model
if epoch % 1 == 0:
model_state_dict = model.module.state_dict() if len(args.gpus) > 1 else model.state_dict()
model_parameters = model.module.parameters() if len(args.gpus) > 1 else model.parameters()
save_checkpoint({
'epoch': epoch + 1,
'model': args.model,
'state_dict': model_state_dict,
'best_prec1': best_prec1,
'parameters': list(model_parameters),
}, is_best, path=save_path)
if args.time_estimate > 0 and epoch % args.time_estimate==0:
time_end = datetime.now()
cost_time,finish_time = get_time(time_end-time_start,epoch,args.epochs)
logging.info('Time cost: '+cost_time+'\t'
'Time of Finish: '+finish_time)
logging.info('\n Epoch: {0}\t'
'Training Loss {train_loss:.4f} \t'
'Training Prec@1 {train_prec1:.3f} \t'
'Training Prec@5 {train_prec5:.3f} \t'
'Validation Loss {val_loss:.4f} \t'
'Validation Prec@1 {val_prec1:.3f} \t'
'Validation Prec@5 {val_prec5:.3f} \n'
.format(epoch + 1, train_loss=train_loss, val_loss=val_loss,
train_prec1=train_prec1, val_prec1=val_prec1,
train_prec5=train_prec5, val_prec5=val_prec5))
logging.info('*'*50+'DONE'+'*'*50)
logging.info('\n Best_Epoch: {0}\t'
'Best_Prec1 {prec1:.4f} \t'
'Best_Loss {loss:.3f} \t'
.format(best_epoch+1, prec1=best_prec1, loss=best_loss))
def forward(data_loader, model, criterion, epoch=0, training=True, optimizer=None):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
for i, (inputs, target) in enumerate(data_loader):
# measure data loading time
data_time.update(time.time() - end)
if i==1 and training:
for module in conv_modules:
module.epoch=-1
if args.gpus is not None:
inputs = inputs.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
input_var = Variable(inputs.type(args.type))
target_var = Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
if type(output) is list:
output = output[0]
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
if training:
# compute gradient
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
logging.info('{phase} - Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(data_loader),
phase='TRAINING' if training else 'EVALUATING',
batch_time=batch_time,
data_time=data_time, loss=losses,
top1=top1, top5=top5))
return losses.avg, top1.avg, top5.avg
def train(data_loader, model, criterion, epoch, optimizer):
model.train()
return forward(data_loader, model, criterion, epoch,
training=True, optimizer=optimizer)
def validate(data_loader, model, criterion, epoch):
model.eval()
return forward(data_loader, model, criterion, epoch,
training=False, optimizer=None)
if __name__ == '__main__':
main()
| 12,926 | 38.411585 | 161 | py |
RBNN | RBNN-master/cifar/modules/binarized_modules.py | import torch
import torch.nn as nn
import math
import numpy as np
import torch.nn.functional as F
from torch.autograd import Function, Variable
from scipy.stats import ortho_group
from utils.options import args
class BinarizeConv2d(nn.Conv2d):
def __init__(self, *kargs, **kwargs):
super(BinarizeConv2d, self).__init__(*kargs, **kwargs)
self.k = torch.tensor([10.]).float()
self.t = torch.tensor([0.1]).float()
self.epoch = -1
w = self.weight
self.a, self.b = get_ab(np.prod(w.shape[1:]))
R1 = torch.tensor(ortho_group.rvs(dim=self.a)).float().cuda()
R2 = torch.tensor(ortho_group.rvs(dim=self.b)).float().cuda()
self.register_buffer('R1', R1)
self.register_buffer('R2', R2)
self.Rweight = torch.ones_like(w)
sw = w.abs().view(w.size(0), -1).mean(-1).float().view(w.size(0), 1, 1).detach()
self.alpha = nn.Parameter(sw.cuda(), requires_grad=True)
self.rotate = nn.Parameter(torch.ones(w.size(0), 1, 1, 1).cuda()*np.pi/2, requires_grad=True)
self.Rotate = torch.zeros(1)
def forward(self, input):
a0 = input
w = self.weight
w1 = w - w.mean([1,2,3], keepdim=True)
w2 = w1 / w1.std([1,2,3], keepdim=True)
a1 = a0 - a0.mean([1,2,3], keepdim=True)
a2 = a1 / a1.std([1,2,3], keepdim=True)
a, b = self.a, self.b
X = w2.view(w.shape[0], a, b)
if self.epoch > -1 and self.epoch % args.rotation_update == 0:
for _ in range(3):
#* update B
V = self.R1.t() @ X.detach() @ self.R2
B = torch.sign(V)
#* update R1
D1 = sum([Bi@(self.R2.t())@(Xi.t()) for (Bi,Xi) in zip(B,X.detach())])
U1, S1, V1 = torch.svd(D1)
self.R1 = (V1@(U1.t()))
#* update R2
D2 = sum([(Xi.t())@self.R1@Bi for (Xi,Bi) in zip(X.detach(),B)])
U2, S2, V2 = torch.svd(D2)
self.R2 = (U2@(V2.t()))
self.Rweight = ((self.R1.t())@X@(self.R2)).view_as(w)
delta = self.Rweight.detach() - w2
w3 = w2 + torch.abs(torch.sin(self.rotate)) * delta
#* binarize
bw = BinaryQuantize().apply(w3, self.k.to(w.device), self.t.to(w.device))
if args.a32:
ba = a2
else:
ba = BinaryQuantize_a().apply(a2, self.k.to(w.device), self.t.to(w.device))
#* 1bit conv
output = F.conv2d(ba, bw, self.bias, self.stride, self.padding,
self.dilation, self.groups)
#* scaling factor
output = output * self.alpha
return output
class BinaryQuantize(Function):
@staticmethod
def forward(ctx, input, k, t):
ctx.save_for_backward(input, k, t)
out = torch.sign(input)
return out
@staticmethod
def backward(ctx, grad_output):
input, k, t = ctx.saved_tensors
grad_input = k * (2 * torch.sqrt(t**2 / 2) - torch.abs(t**2 * input))
grad_input = grad_input.clamp(min=0) * grad_output.clone()
return grad_input, None, None
class BinaryQuantize_a(Function):
@staticmethod
def forward(ctx, input, k, t):
ctx.save_for_backward(input, k, t)
out = torch.sign(input)
return out
@staticmethod
def backward(ctx, grad_output):
input, k, t = ctx.saved_tensors
k = torch.tensor(1.).to(input.device)
t = max(t, torch.tensor(1.).to(input.device))
grad_input = k * (2 * torch.sqrt(t**2 / 2) - torch.abs(t**2 * input))
grad_input = grad_input.clamp(min=0) * grad_output.clone()
return grad_input, None, None
def get_ab(N):
sqrt = int(np.sqrt(N))
for i in range(sqrt, 0, -1):
if N % i == 0:
return i, N // i
| 3,835 | 34.518519 | 101 | py |
RBNN | RBNN-master/cifar/modules/__init__.py | from .binarized_modules import * | 32 | 32 | 32 | py |
RBNN | RBNN-master/cifar/dataset/dataset.py | from datetime import datetime
import os
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
def load_data(type='both',dataset='cifar10',data_path='/data',batch_size = 256,batch_size_test=256,num_workers=0):
# load data
param = {'cifar10':{'name':datasets.CIFAR10,'size':32,'normalize':[[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]]},
'cifar100':{'name':datasets.CIFAR100,'size':32,'normalize':[(0.507, 0.487, 0.441), (0.267, 0.256, 0.276)]},
'mnist':{'name':datasets.MNIST,'size':32,'normalize':[(0.5,0.5,0.5),(0.5,0.5,0.5)]},
'tinyimagenet':{'name':datasets.ImageFolder,'size':224,'normalize':[(0.4802, 0.4481, 0.3975), (0.2302, 0.2265, 0.2262)]}}
data = param[dataset]
if data['name']==datasets.ImageFolder:
data_transforms = {
'train': transforms.Compose([
transforms.Resize(data['size']),
transforms.RandomRotation(20),
transforms.RandomHorizontalFlip(0.5),
transforms.ToTensor(),
transforms.Normalize(*data['normalize']),
]),
'val': transforms.Compose([
transforms.Resize(data['size']),
transforms.ToTensor(),
transforms.Normalize(*data['normalize']),
]),
'test': transforms.Compose([
transforms.Resize(data['size']),
transforms.ToTensor(),
transforms.Normalize(*data['normalize']),
])
}
data_dir = os.path.join(data_path,'tiny-imagenet-200')
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: DataLoader(image_datasets[x], batch_size=batch_size, shuffle=(x=='train'), num_workers=num_workers)
for x in ['train', 'val']}
return dataloaders.values()
else:
transform1 = transforms.Compose([
transforms.RandomCrop(data['size'],padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(*data['normalize']),
])
transform2 = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(*data['normalize']),
])
trainset = data['name'](root=data_path,
train=True,
download=False,
transform=transform1);
trainloader = DataLoader(
trainset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True)
testset = data['name'](root=data_path,
train=False,
download=False,
transform=transform2);
testloader = DataLoader(
testset,
batch_size=batch_size_test,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
if type=='both':
return trainloader, testloader
elif type=='train':
return trainloader
elif type=='val':
return testloader
def delete_module_fromdict(statedict):
from collections import OrderedDict
new_state_dict = OrderedDict()
for k,v in statedict.items():
name = k[7:]
new_state_dict[name] = v
return new_state_dict
def add_module_fromdict(statedict):
from collections import OrderedDict
new_state_dict = OrderedDict()
for k,v in statedict.items():
name = 'module.'+k
new_state_dict[name] = v
return new_state_dict
| 3,858 | 37.59 | 134 | py |
RBNN | RBNN-master/cifar/dataset/__init__.py | from .dataset import load_data, add_module_fromdict
from .imagenet import get_imagenet_iter_dali as get_imagenet | 112 | 55.5 | 60 | py |
RBNN | RBNN-master/cifar/dataset/imagenet.py | import time
import torch.utils.data
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import torchvision.datasets as datasets
from nvidia.dali.pipeline import Pipeline
import torchvision.transforms as transforms
from nvidia.dali.plugin.pytorch import DALIClassificationIterator, DALIGenericIterator
class HybridTrainPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, dali_cpu=False, local_rank=0, world_size=1):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id)
dali_device = "gpu"
self.input = ops.FileReader(file_root=data_dir, shard_id=local_rank, num_shards=world_size, random_shuffle=True)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.res = ops.RandomResizedCrop(device="gpu", size=crop, random_area=[0.08, 1.25])
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
image_type=types.RGB,
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
self.coin = ops.CoinFlip(probability=0.5)
print('DALI "{0}" variant'.format(dali_device))
def define_graph(self):
rng = self.coin()
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images, mirror=rng)
return [output, self.labels]
class HybridValPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, size, local_rank=0, world_size=1):
super(HybridValPipe, self).__init__(batch_size, num_threads, device_id, seed=12 + device_id)
self.input = ops.FileReader(file_root=data_dir, shard_id=local_rank, num_shards=world_size,
random_shuffle=False)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.res = ops.Resize(device="gpu", resize_shorter=size, interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop, crop),
image_type=types.RGB,
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
std=[0.229 * 255, 0.224 * 255, 0.225 * 255])
def define_graph(self):
self.jpegs, self.labels = self.input(name="Reader")
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images)
return [output, self.labels]
def get_imagenet_iter_dali(type, image_dir, batch_size, num_threads, device_id, num_gpus, crop, val_size=256,
world_size=1,
local_rank=0):
if type == 'train':
pip_train = HybridTrainPipe(batch_size=batch_size, num_threads=num_threads, device_id=local_rank,
data_dir=image_dir + '/ILSVRC2012_img_train',
crop=crop, world_size=world_size, local_rank=local_rank)
pip_train.build()
dali_iter_train = DALIClassificationIterator(pip_train, size=pip_train.epoch_size("Reader") // world_size, auto_reset=True)
return dali_iter_train
elif type == 'val':
pip_val = HybridValPipe(batch_size=batch_size, num_threads=num_threads, device_id=local_rank,
data_dir=image_dir + '/val',
crop=crop, size=val_size, world_size=world_size, local_rank=local_rank)
pip_val.build()
dali_iter_val = DALIClassificationIterator(pip_val, size=pip_val.epoch_size("Reader") // world_size, auto_reset=True)
return dali_iter_val
def get_imagenet_iter_torch(type, image_dir, batch_size, num_threads, device_id, num_gpus, crop, val_size=256,
world_size=1, local_rank=0):
if type == 'train':
transform = transforms.Compose([
transforms.RandomResizedCrop(crop, scale=(0.08, 1.25)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
dataset = datasets.ImageFolder(image_dir + '/train', transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=num_threads,
pin_memory=True)
else:
transform = transforms.Compose([
transforms.Resize(val_size),
transforms.CenterCrop(crop),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
dataset = datasets.ImageFolder(image_dir + '/val', transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=num_threads,
pin_memory=True)
return dataloader
if __name__ == '__main__':
train_loader = get_imagenet_iter_dali(type='train', image_dir='/userhome/memory_data/imagenet', batch_size=256,
num_threads=4, crop=224, device_id=0, num_gpus=1)
print('start iterate')
start = time.time()
for i, data in enumerate(train_loader):
images = data[0]["data"].cuda(non_blocking=True)
labels = data[0]["label"].squeeze().long().cuda(non_blocking=True)
end = time.time()
print('end iterate')
print('dali iterate time: %fs' % (end - start))
train_loader = get_imagenet_iter_torch(type='train', image_dir='/userhome/data/imagenet', batch_size=256,
num_threads=4, crop=224, device_id=0, num_gpus=1)
print('start iterate')
start = time.time()
for i, data in enumerate(train_loader):
images = data[0].cuda(non_blocking=True)
labels = data[1].cuda(non_blocking=True)
end = time.time()
print('end iterate')
print('torch iterate time: %fs' % (end - start))
| 6,546 | 51.376 | 131 | py |
RBNN | RBNN-master/cifar/models_imagenet/resnet.py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.init as init
from modules import *
BN = None
__all__ = ['resnet18_1w1a', 'resnet34_1w1a']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
}
def conv3x3Binary(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return BinarizeConv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3Binary(inplanes, planes, stride)
self.bn1 = BN(planes)
self.nonlinear = nn.Hardtanh(inplace=True)
self.conv2 = conv3x3Binary(planes, planes)
self.bn2 = BN(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.nonlinear(out)
residual = out
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.nonlinear(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, deep_stem=False,
avg_down=False, bypass_last_bn=False,
bn_group_size=1,
bn_group=None,
bn_sync_stats=False,
use_sync_bn=True):
global BN, bypass_bn_weight_list
BN = nn.BatchNorm2d
bypass_bn_weight_list = []
self.inplanes = 64
super(ResNet, self).__init__()
self.deep_stem = deep_stem
self.avg_down = avg_down
if self.deep_stem:
self.conv1 = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1, bias=False),
BN(32),
nn.Hardtanh(inplace=True),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1, bias=False),
BN(32),
nn.Hardtanh(inplace=True),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1, bias=False),
)
else:
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = BN(64)
self.nonlinear1 = nn.Hardtanh(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.bn2 = nn.BatchNorm1d(512 * block.expansion)
self.nonlinear2 = nn.Hardtanh(inplace=True)
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.bn3 = nn.BatchNorm1d(num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1e-8)
m.bias.data.zero_()
if bypass_last_bn:
for param in bypass_bn_weight_list:
param.data.zero_()
print('bypass {} bn.weight in BottleneckBlocks'.format(len(bypass_bn_weight_list)))
def _make_layer(self, block, planes, blocks, stride=1, avg_down=False):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
if self.avg_down:
downsample = nn.Sequential(
nn.AvgPool2d(stride, stride=stride, ceil_mode=True, count_include_pad=False),
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=1, bias=False),
BN(planes * block.expansion),
)
else:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BN(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.nonlinear1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.bn2(x)
x = self.fc(x)
return x
def resnet18_1w1a(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34_1w1a(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
| 5,965 | 31.248649 | 97 | py |
RBNN | RBNN-master/cifar/models_imagenet/__init__.py | from .resnet import * | 21 | 21 | 21 | py |
RBNN | RBNN-master/cifar/models_cifar/resnet2.py | '''ResNet in PyTorch.
For Pre-activation ResNet, see 'preact_resnet.py'.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from modules import *
__all__ =['resnet18A_1w1a','resnet18B_1w1a','resnet18C_1w1a','resnet18_1w1a']
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = BinarizeConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = BinarizeConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
BinarizeConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.hardtanh(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.hardtanh(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = BinarizeConv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = BinarizeConv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = BinarizeConv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
BinarizeConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_channel, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = num_channel[0]
self.conv1 = nn.Conv2d(3, num_channel[0], kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_channel[0])
self.layer1 = self._make_layer(block, num_channel[0], num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, num_channel[1], num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, num_channel[2], num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, num_channel[3], num_blocks[3], stride=2)
self.linear = nn.Linear(num_channel[3]*block.expansion, num_classes)
self.bn2 = nn.BatchNorm1d(num_channel[3]*block.expansion)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.bn1(self.conv1(x))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.bn2(out)
out = self.linear(out)
return out
def resnet18A_1w1a(**kwargs):
return ResNet(BasicBlock, [2,2,2,2],[32,32,64,128],**kwargs)
def resnet18B_1w1a(**kwargs):
return ResNet(BasicBlock, [2,2,2,2],[32,64,128,256],**kwargs)
def resnet18C_1w1a(**kwargs):
return ResNet(BasicBlock, [2,2,2,2],[64,64,128,256],**kwargs)
def resnet18_1w1a(**kwargs):
return ResNet(BasicBlock, [2,2,2,2],[64,128,256,512],**kwargs)
def ResNet34():
return ResNet(BasicBlock, [3,4,6,3])
def ResNet50():
return ResNet(Bottleneck, [3,4,6,3])
def ResNet101():
return ResNet(Bottleneck, [3,4,23,3])
def ResNet152():
return ResNet(Bottleneck, [3,8,36,3])
def test():
net = resnet18_1w1a()
y = net(torch.randn(1,3,32,32))
print(y.size())
# test()
| 4,704 | 33.343066 | 107 | py |
RBNN | RBNN-master/cifar/models_cifar/resnet.py | '''
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from modules import *
from torch.autograd import Variable
__all__ = ['resnet20_1w1a']
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class BasicBlock_1w1a(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock_1w1a, self).__init__()
self.conv1 = BinarizeConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = BinarizeConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
BinarizeConv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = self.bn1(self.conv1(x))
out = F.hardtanh(out)
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.hardtanh(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.bn2 = nn.BatchNorm1d(64)
self.linear = nn.Linear(64, num_classes)
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1e-8)
m.bias.data.zero_()
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.hardtanh(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.bn2(out)
out = self.linear(out)
return out
def resnet20_1w1a(**kwargs):
return ResNet(BasicBlock_1w1a, [3, 3, 3],**kwargs)
def resnet20():
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print()
| 6,508 | 31.708543 | 120 | py |
RBNN | RBNN-master/cifar/models_cifar/vgg.py | '''VGG for CIFAR10. FC layers are removed.
(c) YANG, Wei
'''
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import math
from modules import *
__all__ = ['vgg_small_1w1a']
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000):
super(VGG, self).__init__()
self.features = features
self.bn2 = nn.BatchNorm1d(512 * block.expansion)
self.nonlinear2 = nn.Hardtanh(inplace=True)
self.classifier = nn.Linear(512, num_classes)
self.bn3 = nn.BatchNorm1d(num_classes)
self.logsoftmax = nn.LogSoftmax(dim=1)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.bn2(x)
x = self.nonlinear2(x)
x = self.classifier(x)
x = self.bn3(x)
x = self.logsoftmax(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.Hardtanh(inplace=True)]
else:
layers += [conv2d, nn.Hardtanh(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
'F': [128, 128, 'M', 512, 512, 'M'],
}
def vgg11(**kwargs):
"""VGG 11-layer model (configuration "A")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['A']), **kwargs)
return model
def vgg11_bn(**kwargs):
"""VGG 11-layer model (configuration "A") with batch normalization"""
model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
return model
def vgg13(**kwargs):
"""VGG 13-layer model (configuration "B")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['B']), **kwargs)
return model
def vgg13_bn(**kwargs):
"""VGG 13-layer model (configuration "B") with batch normalization"""
model = VGG(make_layers(cfg['B'], batch_norm=True), **kwargs)
return model
def vgg16(**kwargs):
"""VGG 16-layer model (configuration "D")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['D']), **kwargs)
return model
def vgg16_bn(**kwargs):
"""VGG 16-layer model (configuration "D") with batch normalization"""
model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)
return model
def vgg19(**kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['E']), **kwargs)
return model
def vgg19_bn(**kwargs):
"""VGG 19-layer model (configuration 'E') with batch normalization"""
model = VGG(make_layers(cfg['E'], batch_norm=True), **kwargs)
return model
class VGG_SMALL_1W1A(nn.Module):
def __init__(self, num_classes=10):
super(VGG_SMALL_1W1A, self).__init__()
self.conv0 = nn.Conv2d(3, 128, kernel_size=3, padding=1, bias=False)
self.bn0 = nn.BatchNorm2d(128)
self.conv1 = BinarizeConv2d(128, 128, kernel_size=3, padding=1, bias=False)
self.pooling = nn.MaxPool2d(kernel_size=2, stride=2)
self.bn1 = nn.BatchNorm2d(128)
self.nonlinear = nn.Hardtanh(inplace=True)
self.conv2 = BinarizeConv2d(128, 256, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(256)
self.conv3 = BinarizeConv2d(256, 256, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(256)
self.conv4 = BinarizeConv2d(256, 512, kernel_size=3, padding=1, bias=False)
self.bn4 = nn.BatchNorm2d(512)
self.conv5 = BinarizeConv2d(512, 512, kernel_size=3, padding=1, bias=False)
self.bn5 = nn.BatchNorm2d(512)
self.fc = nn.Linear(512*4*4, num_classes)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, BinarizeConv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def forward(self, x):
x = self.conv0(x)
x = self.bn0(x)
x = self.nonlinear(x)
x = self.conv1(x)
x = self.pooling(x)
x = self.bn1(x)
x = self.nonlinear(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.nonlinear(x)
x = self.conv3(x)
x = self.pooling(x)
x = self.bn3(x)
x = self.nonlinear(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.nonlinear(x)
x = self.conv5(x)
x = self.pooling(x)
x = self.bn5(x)
x = self.nonlinear(x)
# x = self.pooling(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def vgg_small_1w1a(**kwargs):
model = VGG_SMALL_1W1A(**kwargs)
return model
| 7,083 | 30.766816 | 113 | py |
RBNN | RBNN-master/cifar/models_cifar/__init__.py | from .resnet import *
from .resnet_bireal import *
from .resnet2 import *
from .vgg import * | 93 | 22.5 | 28 | py |
RBNN | RBNN-master/cifar/models_cifar/resnet_bireal.py | '''
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from modules import *
from torch.autograd import Variable
__all__ = ['resnet20_bireal_1w1a']
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class BasicBlock_1w1a(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock_1w1a, self).__init__()
self.conv1 = BinarizeConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = BinarizeConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
BinarizeConv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = self.bn1(self.conv1(x))
out += self.shortcut(x)
out = F.hardtanh(out)
x1 = out
out = self.bn2(self.conv2(out))
out += x1
out = F.hardtanh(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.bn2 = nn.BatchNorm1d(64)
self.linear = nn.Linear(64, num_classes)
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1e-8)
m.bias.data.zero_()
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.hardtanh(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.bn2(out)
out = self.linear(out)
return out
def resnet20_bireal_1w1a(**kwargs):
return ResNet(BasicBlock_1w1a, [3, 3, 3],**kwargs)
def resnet20():
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print()
| 6,557 | 31.626866 | 120 | py |
RBNN | RBNN-master/cifar/utils/common.py | import os
import torch
import logging.config
import shutil
import torch.nn as nn
import numpy
import datetime
def setup_logging(log_file='log.txt',filemode='w'):
"""Setup logging configuration
"""
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
filename=log_file,
filemode=filemode)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def save_checkpoint(state, is_best, path='.', filename='checkpoint.pth.tar', save_all=False):
filename = os.path.join(path, filename)
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, os.path.join(path, 'model_best.pth.tar'))
if save_all:
shutil.copyfile(filename, os.path.join(
path, 'checkpoint_epoch_%s.pth.tar' % state['epoch']))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.float().topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def get_time(delta,epoch,epochs):
now = datetime.datetime.now()
clip = 0 if delta>=datetime.timedelta(hours=1) else 1
cost_time = ':'.join(str(delta).split(':')[clip:]).split('.')[0]
delta *= epochs-epoch-1
finish = now + delta
finish_time=finish.strftime('%Y-%m-%d %H:%M:%S')
return cost_time,finish_time
| 2,252 | 27.884615 | 93 | py |
RBNN | RBNN-master/cifar/utils/options.py | import argparse
import os
"""
args
"""
parser = argparse.ArgumentParser(description='RotationNet')
# Logging
parser.add_argument(
'--results_dir',
metavar='RESULTS_DIR',
default='./results',
help='results dir')
parser.add_argument(
'--save',
metavar='SAVE',
default='',
help='saved folder (named by datetime)')
parser.add_argument(
'--resume',
dest='resume',
action='store_true',
help='resume to latest checkpoint')
parser.add_argument(
'-e',
'--evaluate',
type=str,
metavar='FILE',
help='evaluate model FILE on validation set')
parser.add_argument(
'--seed',
default=1234,
type=int,
help='random seed')
# Model
parser.add_argument(
'--model',
'-a',
metavar='MODEL',
default='resnet20_bireal_1w1a',
help='model architecture ')
parser.add_argument(
'--dataset',
default='cifar10',
type=str,
help='dataset, default:cifar10')
parser.add_argument(
'--data_path',
type=str,
default='/home/data',
help='The dictionary where the dataset is stored.')
parser.add_argument(
'--type',
default='torch.cuda.FloatTensor',
help='type of tensor - e.g torch.cuda.FloatTensor')
# Training
parser.add_argument(
'--gpus',
default='0',
help='gpus used for training - e.g 0,1,3')
parser.add_argument(
'--lr',
default=0.1,
type=float,
help='learning rate')
parser.add_argument(
'--weight_decay',
type=float,
default=1e-4,
help='Weight decay of loss. default:1e-4')
parser.add_argument(
'--momentum',
default=0.9,
type=float,
metavar='M',
help='momentum')
parser.add_argument(
'--workers',
default=8,
type=int,
metavar='N',
help='number of data loading workers (default: 8)')
parser.add_argument(
'--epochs',
default=1000,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument(
'--start_epoch',
default=-1,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument(
'-b',
'--batch_size',
default=256,
type=int,
metavar='N',
help='mini-batch size for training (default: 256)')
parser.add_argument(
'-bt',
'--batch_size_test',
default=128,
type=int,
help='mini-batch size for testing (default: 128)')
parser.add_argument(
'--print_freq',
'-p',
default=100,
type=int,
metavar='N',
help='print frequency (default: 100)')
parser.add_argument(
'--time_estimate',
default=1,
type=int,
metavar='N',
help='print estimating finish time,set to 0 to disable')
parser.add_argument(
'--rotation_update',
default=1,
type=int,
metavar='N',
help='interval of updating rotation matrix (default:1)')
parser.add_argument(
'--Tmin',
default=1e-2,
type=float,
metavar='M',
help='minimum of T (default:1e-2)')
parser.add_argument(
'--Tmax',
default=1e1,
type=float,
metavar='M',
help='maximum of T (default:1e1)')
parser.add_argument(
'--lr_type',
type=str,
default='cos',
help='choose lr_scheduler,(default:cos)')
parser.add_argument(
'--lr_decay_step',
nargs='+',
type=int,
help='lr decay step for MultiStepLR')
parser.add_argument(
'--a32',
dest='a32',
action='store_true',
help='w1a32')
parser.add_argument(
'--warm_up',
dest='warm_up',
action='store_true',
help='use warm up or not')
args = parser.parse_args() | 3,554 | 18.010695 | 60 | py |
DeepLabV3FineTuning | DeepLabV3FineTuning-master/sources/dataloader.py | # from __future__ import print_function
# from __future__ import division
import torch
import numpy as np
from torchvision import transforms
import os
import glob
from PIL import Image
class DataLoaderSegmentation(torch.utils.data.dataset.Dataset):
def __init__(self, folder_path, mode):
super(DataLoaderSegmentation, self).__init__()
self.img_files = glob.glob(os.path.join(folder_path,'Images','*.*'))
self.label_files = []
for img_path in self.img_files:
image_filename, _ = os.path.splitext(os.path.basename(img_path))
label_filename_with_ext = f"{image_filename}.png"
self.label_files.append(os.path.join(folder_path, 'Labels', label_filename_with_ext))
# Data augmentation and normalization for training
# Just normalization for validation
if "val" == mode :
self.transforms = transforms.Compose([
transforms.CenterCrop((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406, 0], [0.229, 0.224, 0.225, 1])
])
else:
self.transforms = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
# transforms.RandomResizedCrop((512, 512)),
transforms.RandomCrop((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406, 0], [0.229, 0.224, 0.225, 1])
])
def __getitem__(self, index):
img_path = self.img_files[index]
label_path = self.label_files[index]
image = Image.open(img_path)
label = Image.open(label_path)
# Concatenate image and label, to apply same transformation on both
image_np = np.asarray(image)
label_np = np.asarray(label)
new_shape = (image_np.shape[0], image_np.shape[1], image_np.shape[2] + 1)
image_and_label_np = np.zeros(new_shape, image_np.dtype)
image_and_label_np[:, :, 0:3] = image_np
image_and_label_np[:, :, 3] = label_np
# Convert to PIL
image_and_label = Image.fromarray(image_and_label_np)
# Apply Transforms
image_and_label = self.transforms(image_and_label)
# Extract image and label
image = image_and_label[0:3, :, :]
label = image_and_label[3, :, :].unsqueeze(0)
# Normalize back from [0, 1] to [0, 255]
label = label * 255
# Convert to int64 and remove second dimension
label = label.long().squeeze()
return image, label
def __len__(self):
return len(self.img_files) | 2,806 | 38.535211 | 97 | py |
DeepLabV3FineTuning | DeepLabV3FineTuning-master/sources/main_training.py | import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import os
import argparse
import pathlib
# Local import
from dataloader import DataLoaderSegmentation
from custom_model import initialize_model
from train import train_model
print("PyTorch Version: ",torch.__version__)
print("Torchvision Version: ",torchvision.__version__)
"""
Version requirements:
PyTorch Version: 1.4.0
Torchvision Version: 0.5.0
"""
def main(data_dir, dest_dir, num_classes, batch_size, num_epochs, keep_feature_extract, weight):
# def main():
print("Initializing Datasets and Dataloaders...")
# Create training and validation datasets
image_datasets = {x: DataLoaderSegmentation(os.path.join(data_dir, x), x) for x in ['train', 'val']}
# Create training and validation dataloaders
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=4) for x in ['train', 'val']}
print("Initializing Model...")
# Initialize model
model_deeplabv3, input_size = initialize_model(num_classes, keep_feature_extract, use_pretrained=True)
# Detect if we have a GPU available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Send the model to GPU
model_deeplabv3 = model_deeplabv3.to(device)
# Gather the parameters to be optimized/updated in this run. If we are
# finetuning we will be updating all parameters. However, if we are
# doing feature extract method, we will only update the parameters
# that we have just initialized, i.e. the parameters with requires_grad
# is True.
params_to_update = model_deeplabv3.parameters()
print("Params to learn:")
if keep_feature_extract:
params_to_update = []
for name, param in model_deeplabv3.named_parameters():
if param.requires_grad:
params_to_update.append(param)
print("\t", name)
else:
for name, param in model_deeplabv3.named_parameters():
if param.requires_grad:
print("\t", name)
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(params_to_update, lr=0.001, momentum=0.9)
# Setup the loss function
criterion = nn.CrossEntropyLoss(weight=(torch.FloatTensor(weight).to(device) if weight else None))
# Prepare output directory
pathlib.Path(dest_dir).mkdir(parents=True, exist_ok=True)
print("Train...")
# Train and evaluate
model_deeplabv3_state_dict, hist = train_model(model_deeplabv3, num_classes, dataloaders_dict, criterion, optimizer_ft, device, dest_dir, num_epochs=num_epochs)
print("Save ...")
torch.save(model_deeplabv3_state_dict, os.path.join(dest_dir, "best_DeepLabV3_Skydiver.pth"))
def args_preprocess():
# Command line arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"data_dir", help='Specify the dataset directory path, should contain train/Images, train/Labels, val/Images and val/Labels')
parser.add_argument(
"dest_dir", help='Specify the directory where model weights shall be stored.')
parser.add_argument("--num_classes", default=5, type=int, help="Number of classes in the dataset, index 0 for no-label should be included in the count")
parser.add_argument("--epochs", default=100, type=int, help="Number of epochs to train for")
parser.add_argument("--batch_size", default=16, type=int, help="Batch size for training (change depending on how much memory you have)")
parser.add_argument("--keep_feature_extract", action="store_true", help="Flag for feature extracting. When False, we finetune the whole model, when True we only update the reshaped layer params")
parser.add_argument('-w', action='append', type=float, help="Add more weight to some classes. If this argument is used, then it should be called as many times as there are classes (see --num_classes)")
args = parser.parse_args()
# Build weight list
weight = []
if args.w:
for w in args.w:
weight.append(w)
main(args.data_dir, args.dest_dir, args.num_classes, args.batch_size, args.epochs, args.keep_feature_extract, weight)
if __name__ == '__main__':
args_preprocess() | 4,292 | 39.5 | 205 | py |
DeepLabV3FineTuning | DeepLabV3FineTuning-master/sources/custom_model.py | import torchvision
from torchvision import models
import torch
class DeepLabV3Wrapper(torch.nn.Module):
def __init__(self, model):
super(DeepLabV3Wrapper, self).__init__()
self.model = model
def forward(self, input):
output = self.model(input)['out']
return output
def initialize_model(num_classes, keep_feature_extract=False, use_pretrained=True):
""" DeepLabV3 pretrained on a subset of COCO train2017, on the 20 categories that are present in the Pascal VOC dataset.
"""
model_deeplabv3 = models.segmentation.deeplabv3_resnet101(pretrained=use_pretrained, progress=True)
model_deeplabv3.aux_classifier = None
if keep_feature_extract:
for param in model_deeplabv3.parameters():
param.requires_grad = False
input_size = 224
model_deeplabv3.classifier = torchvision.models.segmentation.deeplabv3.DeepLabHead(2048, num_classes)
return model_deeplabv3, input_size
| 960 | 33.321429 | 124 | py |
DeepLabV3FineTuning | DeepLabV3FineTuning-master/sources/main_inference.py | import torch
import numpy as np
from torchvision import transforms
import cv2
from PIL import Image
import custom_model
# Number of classes in the dataset
num_classes = 5
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model, input_size = custom_model.initialize_model(num_classes, keep_feature_extract=True, use_pretrained=False)
state_dict = torch.load("training_output_Skydiver_dataset_final/best_DeepLabV3_Skydiver.pth", map_location=device)
model = model.to(device)
model.load_state_dict(state_dict)
model.eval()
transforms_image = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
for idx in range(1, 3000, 25):
image = Image.open(f"/tmp/pycharm_project_782/03.03.20_saut_4/{idx:06}.png")
image_np = np.asarray(image)
# image_np = cv2.resize(image_np, 0.5, 0.5, cv2.INTER_CUBIC)
width = int(image_np.shape[1] * 0.3)
height = int(image_np.shape[0] * 0.3)
dim = (width, height)
image_np = cv2.resize(image_np, dim, interpolation=cv2.INTER_AREA)
image = Image.fromarray(image_np)
image = transforms_image(image)
image = image.unsqueeze(0)
image = image.to(device)
outputs = model(image)["out"]
_, preds = torch.max(outputs, 1)
preds = preds.to("cpu")
preds_np = preds.squeeze(0).cpu().numpy().astype(np.uint8)
print(preds_np.shape)
print(image_np.shape)
# preds_np = cv2.cvtColor(preds_np, cv2.COLOR_GRAY2BGR)
image_np = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
preds_np_color = cv2.applyColorMap(preds_np * 50, cv2.COLORMAP_HSV)
cv2.imwrite(f"./results/{idx:04}_segmentation.png", preds_np_color)
cv2.imwrite(f"./results/{idx:04}_image.png", image_np)
| 1,775 | 27.190476 | 114 | py |
DeepLabV3FineTuning | DeepLabV3FineTuning-master/sources/main_export.py | import torch
import custom_model
# Number of classes in the dataset
num_classes = 2
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model_deeplabv3, input_size = custom_model.initialize_model(num_classes, keep_feature_extract=True, use_pretrained=False)
state_dict = torch.load("training_output_Skydiver_dataset_person/best_DeepLabV3_Skydiver.pth", map_location=device)
model_deeplabv3 = model_deeplabv3.to(device)
model_deeplabv3.load_state_dict(state_dict)
model_deeplabv3.eval()
model_deeplabv3wrapper = custom_model.DeepLabV3Wrapper(model_deeplabv3)
dummy_input = torch.rand(1, 3, input_size, input_size).to(device)
traced_script_module = torch.jit.trace(model_deeplabv3wrapper, dummy_input)
traced_script_module.save("training_output_Skydiver_dataset_person/best_deeplabv3_skydiver.pt")
| 824 | 34.869565 | 121 | py |
DeepLabV3FineTuning | DeepLabV3FineTuning-master/sources/train.py | import os
import torch
import numpy as np
import time
import copy
import cv2
def debug_export_before_forward(inputs, labels, idx):
# im = inputs[0]*255;
im = inputs[0];
im = im.to('cpu').numpy()
im[0, :, :] = im[0, :, :] * 0.229 + 0.485
im[1, :, :] = im[1, :, :] * 0.224 + 0.456
im[2, :, :] = im[2, :, :] * 0.225 + 0.406
im = im * 255
im = im.astype(np.uint8)
la = labels[0].to(torch.uint8).to('cpu').numpy()
im = im.transpose([1, 2, 0])
im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
cv2.imwrite(f"{idx:06}_im.png", im)
cv2.imwrite(f"{idx:06}_la.png", la)
def iou(pred, target, n_classes = 3):
ious = []
pred = pred.view(-1)
target = target.view(-1)
# Ignore IoU for background class ("0")
for cls in range(1, n_classes): # This goes from 1:n_classes-1 -> class "0" is ignored
pred_inds = pred == cls
target_inds = target == cls
intersection = (pred_inds[target_inds]).long().sum().data.cpu().item() # Cast to long to prevent overflows
union = pred_inds.long().sum().data.cpu().item() + target_inds.long().sum().data.cpu().item() - intersection
if union > 0:
ious.append(float(intersection) / float(max(union, 1)))
return np.array(ious)
def train_model(model, num_classes, dataloaders, criterion, optimizer, device, dest_dir, num_epochs=25):
since = time.time()
val_acc_history = []
best_model_state_dict = copy.deepcopy(model.state_dict())
best_acc = 0.0
counter = 0
for epoch in range(1, num_epochs+1):
print('Epoch {}/{}'.format(epoch, num_epochs))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_iou_means = []
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# Security, skip this iteration if the batch_size is 1
if 1 == inputs.shape[0]:
print("Skipping iteration because batch_size = 1")
continue
# Debug
# debug_export_before_forward(inputs, labels, counter)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)['out']
loss = criterion(outputs, labels)
_, preds = torch.max(outputs, 1)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
iou_mean = iou(preds, labels, num_classes).mean()
running_loss += loss.item() * inputs.size(0)
running_iou_means.append(iou_mean)
# Increment counter
counter = counter + 1
epoch_loss = running_loss / len(dataloaders[phase].dataset)
if running_iou_means is not None:
epoch_acc = np.array(running_iou_means).mean()
else:
epoch_acc = 0.
print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_state_dict = copy.deepcopy(model.state_dict())
if phase == 'val':
val_acc_history.append(epoch_acc)
# Save current model every 25 epochs
if 0 == epoch%25:
current_model_path = os.path.join(dest_dir, f"checkpoint_{epoch:04}_DeepLabV3_Skydiver.pth")
print(f"Save current model : {current_model_path}")
torch.save(model.state_dict(), current_model_path)
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
return best_model_state_dict, val_acc_history
| 4,494 | 33.05303 | 112 | py |
DeepLabV3FineTuning | DeepLabV3FineTuning-master/sample_dataset/convert_cvat_xml_to_label_image.py | import argparse
import numpy as np
import os
import cv2
import xml.etree.ElementTree as ET
from collections import defaultdict
from pathlib import Path
import shutil
def create_path_list(xml_directory):
# Read xml labels, which should be on the format %06d.xml
xml_path_list = []
for r, d, f in os.walk(xml_directory):
for file in f:
if '.xml' in file:
xml_path_list.append(os.path.join(r, file))
xml_path_list.sort()
return xml_path_list
def convert_xml_to_label_image(xml_path, label_classes_dict, label_priority_order):
# Parse XML
tree = ET.parse(xml_path)
root = tree.getroot()
imagesize_element = root.find('imagesize')
# Get image size
nrows = int(imagesize_element.find("nrows").text)
ncols = int(imagesize_element.find("ncols").text)
# Get dict of polygons for each label
objects_dict = defaultdict(list)
for obj in root.iter('object'):
label = obj.find("name").text
pts = []
for pt in obj.find("polygon").iter("pt"):
x_str = pt.find("x").text
y_str = pt.find("y").text
pts.append([int(float(x_str)), int(float(y_str))])
objects_dict[label].append(pts)
# Create blank image
label_image = np.zeros((nrows, ncols), dtype=np.uint8)
for key in label_priority_order:
for pts in objects_dict[key]:
class_value = label_classes_dict[key]
pts_np = np.asarray(pts, dtype=np.int32)
pts_reshaped = pts_np.reshape((-1, 1, 2))
cv2.fillPoly(img=label_image, pts=[pts_reshaped], color=class_value)
return label_image
def main(xml_directory, label_directory):
# Get list of xml annotations
xml_annotation_path_list = create_path_list(xml_directory)
label_classes_dict = defaultdict(int)
label_classes_dict["person"] = 1
label_classes_dict["airplane"] = 2
label_classes_dict["ground"] = 3
label_classes_dict["sky"] = 4
# Writing order of polygons :
# When a pixel belongs to several polygons, the label image will
# only contain the class id of the last class in the list below
label_priority_order = ["sky", "ground", "airplane", "person"]
# label_priority_order = ["person"]
# Create dataset directories
dataset_labels_path = Path(label_directory)
dataset_labels_path.mkdir(parents=True, exist_ok=True)
# Loop over elements
image_counter = 1
for xml_path in xml_annotation_path_list:
# Convert xml into an image of labels
label_image = convert_xml_to_label_image(xml_path, label_classes_dict, label_priority_order)
# Save label_image
label_image_path = os.path.join(dataset_labels_path, f"{image_counter:06}.png")
cv2.imwrite(label_image_path, label_image)
image_counter = image_counter + 1
if __name__ == '__main__':
# Command line arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"xml_directory", help='Specify the path to the directory containing xml annotations. Expected format is export from CVAT "LabelMe ZIP 3.0 for images"')
parser.add_argument(
"label_directory", help='Specify the path to the destination directory which will contain the labels')
args = parser.parse_args()
main(args.xml_directory, args.label_directory) | 3,359 | 32.939394 | 159 | py |
platpal | platpal-master/delete.py | #!/usr/bin/env python
import re
import sys
import os
from os.path import abspath, dirname, exists, join
from subprocess import call
# path
OPT_WKS = abspath(dirname(__file__))
OPT_CMD = join(OPT_WKS, "batch.py")
OPT_DIR = join(OPT_WKS, "vmware", "vtdoc", "analyze", "svendor")
OPT_TCE = join(OPT_WKS, "vmware", "trace")
RE_PTN = re.compile("^(\d+),\"(.*)\",\d+$")
# main
if __name__ == "__main__":
if len(sys.argv) < 4 :
sys.exit("Usage: %s <vendor> <label> <prod> <opts>" % sys.argv[0])
vendor = sys.argv[1]
label = sys.argv[2]
prod = sys.argv[3]
pvd = join(OPT_DIR, vendor)
if not exists(pvd):
sys.exit("vendor %s does not exist" % vendor)
found = None
with open(join(pvd, "index"), "r") as f:
for l in f:
m = RE_PTN.match(l.strip())
if m is None:
raise Exception("Invalid line: %s" % l)
if m.group(2) == label:
found = m.group(1)
break
if found is None:
sys.exit("label %s does not exist" % label)
pn = join(pvd, found)
with open(pn, "r") as f:
hashes = [l.strip().split(",")[0] for l in f]
for h in hashes:
p = join(OPT_TCE, "mpcc-x64-win-10-%s-%s.log" % (prod, h))
if exists(p):
os.remove(p)
p = join(OPT_TCE, "mpcc-x64-win-10-%s-%s.out" % (prod, h))
if exists(p):
os.remove(p)
p = join(OPT_TCE, "mpcc-x64-mac-11-%s-%s.log" % (prod, h))
if exists(p):
os.remove(p)
p = join(OPT_TCE, "mpcc-x64-mac-11-%s-%s.out" % (prod, h))
if exists(p):
os.remove(p)
| 1,718 | 25.859375 | 80 | py |
platpal | platpal-master/batch.py | #!/usr/bin/env python
import os
import sys
import json
from os.path import abspath, basename, dirname, exists, join
from subprocess import call
from collections import OrderedDict
# path
OPT_WKS = abspath(dirname(__file__))
OPT_CMD = join(OPT_WKS, "workflow.py")
# main
if __name__ == "__main__":
if len(sys.argv) < 3 :
sys.exit("Usage: %s <list> <product> <opts>" % sys.argv[0])
pn = sys.argv[1]
tag = sys.argv[2]
with open(pn, "r") as f:
hashes = [l.strip().split(",")[0] for l in f]
for h in hashes:
if len(sys.argv) == 4 and sys.argv[3] == "-v":
cmd = ["python", OPT_CMD, "both", tag, h, "-v"]
else:
cmd = ["python", OPT_CMD, "both", tag, h]
print "=== %s ===" % h
call(cmd)
if len(sys.argv) == 4 and sys.argv[3] == "-a":
continue
raw_input("\nPress enter for next sample\n")
| 949 | 24 | 67 | py |
platpal | platpal-master/workflow.py | #!/usr/bin/env python
import sys
from os.path import abspath, exists, join
from subprocess import check_call
from argparse import ArgumentParser
# paths
OPT_PATH_CUR = abspath(join(__file__, ".."))
OPT_PATH_RAW = join(OPT_PATH_CUR, "vmware", "trace")
# cmds
OPT_CMD_VMBOX = join(OPT_PATH_CUR, "vmware", "box.py")
OPT_CMD_STRACE = join(OPT_PATH_CUR, "analyze", "strace.py")
OPT_CMD_PLUGIN = join(OPT_PATH_CUR, "analyze", "plugin.py")
# utils
def prefix(plat, tag, sample):
if plat == "win":
return "mpcc-x64-win-10-%s-%s" % (tag, sample)
elif plat == "mac":
return "mpcc-x64-mac-11-%s-%s" % (tag, sample)
else:
raise Exception("Unknown platform %s" % plat)
def path_raw_strace(plat, tag, sample):
return join(OPT_PATH_RAW, prefix(plat, tag, sample) + ".out")
def path_raw_plugin(plat, tag, sample):
return join(OPT_PATH_RAW, prefix(plat, tag, sample) + ".log")
# core
def run_single(plat, tag, sample, force = False):
try:
if force:
check_call([OPT_CMD_VMBOX, plat, tag, sample, "-f"])
else:
check_call([OPT_CMD_VMBOX, plat, tag, sample])
except Exception as err:
raise err
def run_pair(tag, sample, force = False):
try:
for plat in ["mac", "win"]:
run_single(plat, tag, sample, force = force)
for plat in ["mac", "win"]:
check_call([OPT_CMD_STRACE, plat, tag, sample])
check_call([OPT_CMD_PLUGIN, "compare",
path_raw_plugin("mac", tag, sample),
path_raw_plugin("win", tag, sample)])
except Exception as err:
print err.message
def run_view(tag, sample):
try:
for plat in ["mac", "win"]:
check_call([OPT_CMD_STRACE, plat, tag, sample])
check_call([OPT_CMD_PLUGIN, "compare",
path_raw_plugin("mac", tag, sample),
path_raw_plugin("win", tag, sample)])
except Exception as err:
print err.message
# main
if __name__ == "__main__":
# parser
parser = ArgumentParser()
parser.add_argument("plat", help="Platform",
choices=("mac", "win", "both"))
parser.add_argument("tag", help="Tag",
choices=(
"dc.07.20033",
"dc.08.20082",
"dc.09.20069",
"dc.10.20056",
"dc.10.20060",
"dc.16.20039",
"dc.16.20045",
"dc.17.20050",
"15.06.30033",
"11.0.00",
"11.0.10",
"10.0.0",
"10.1.0",
"10.1.4"
))
parser.add_argument("sample", help="Sample")
parser.add_argument("-f", "--force", action="store_true")
parser.add_argument("-v", "--view", action="store_true")
args = parser.parse_args()
# run
if args.view:
run_view(args.tag, args.sample)
else:
if args.plat == "both":
run_pair(args.tag, args.sample, args.force)
else:
run_single(args.plat, args.tag, args.sample, args.force)
| 3,127 | 27.962963 | 68 | py |
platpal | platpal-master/vendor.py | #!/usr/bin/env python
import re
import sys
from os.path import abspath, dirname, exists, join
from subprocess import call
# path
OPT_WKS = abspath(dirname(__file__))
OPT_CMD = join(OPT_WKS, "batch.py")
OPT_DIR = join(OPT_WKS, "vmware", "vtdoc", "analyze", "svendor")
RE_PTN = re.compile("^(\d+),\"(.*)\",\d+$")
# main
if __name__ == "__main__":
if len(sys.argv) < 4 :
sys.exit("Usage: %s <vendor> <label> <prod> <opts>" % sys.argv[0])
vendor = sys.argv[1]
label = sys.argv[2]
prod = sys.argv[3]
pvd = join(OPT_DIR, vendor)
if not exists(pvd):
sys.exit("vendor %s does not exist" % vendor)
found = None
with open(join(pvd, "index"), "r") as f:
for l in f:
m = RE_PTN.match(l.strip())
if m is None:
raise Exception("Invalid line: %s" % l)
if m.group(2) == label:
found = m.group(1)
break
if found is None:
sys.exit("label %s does not exist" % label)
pn = join(pvd, found)
cmd = ["python", OPT_CMD, pn, prod]
print "%s - %s - %s" % (vendor, label, prod)
call(cmd)
| 1,189 | 24.319149 | 80 | py |
platpal | platpal-master/result/exploits/cve-to-hash.py | #!/usr/bin/env python
import sys
from os.path import exists, join
from datetime import datetime
OPT_CVE_DIR = "/Users/meng/workbench/mpcc/vmware/vtdoc/analyze/exploit"
if __name__ == "__main__":
pn = sys.argv[1]
hashes = []
with open(pn, "r") as f:
for l in f:
l = l.strip()
with open(join(OPT_CVE_DIR, l.lower()), "r") as f2:
for r in f2:
toks = r.strip().split(",")
print toks[0]
| 488 | 21.227273 | 71 | py |
platpal | platpal-master/result/svendors/vendor-to-hash.py | #!/usr/bin/env python
import re
import sys
from os.path import abspath, dirname, exists, join
from subprocess import call
# path
OPT_DIR = "/Users/meng/workbench/mpcc/vmware/vtdoc/analyze/svendor"
RE_PTN = re.compile("^(\d+),\"(.*)\",\d+$")
# main
if __name__ == "__main__":
pn = sys.argv[1]
hashes = set()
with open(pn, "r") as f:
for l in f:
toks = l.strip().split(",")
vendor = toks[0]
label = toks[1][1:-1]
pvd = join(OPT_DIR, vendor)
if not exists(pvd):
sys.exit("vendor %s does not exist" % vendor)
found = None
with open(join(pvd, "index"), "r") as f:
for l in f:
m = RE_PTN.match(l.strip())
if m is None:
raise Exception("Invalid line: %s" % l)
if m.group(2) == label:
found = m.group(1)
break
if found is None:
sys.exit("label %s does not exist" % label)
pn = join(pvd, found)
with open(pn, "r") as f2:
for l in f2:
hashes.add(l.split(",")[0])
for h in hashes:
print h
| 1,267 | 23.862745 | 67 | py |
platpal | platpal-master/analyze/plugin.py | #!/usr/bin/env python
import re
import sys
from os.path import exists, join
from collections import OrderedDict
import conf
# consts
OPT_SUFFIX = "log"
# regex patterns
RE_LINE_PTN = re.compile("^\[(.*? .*?).*?\] (\d+/\d+) (\w+) (.*)$")
RE_SEC_PTN = re.compile("^([\w-]+):(\w+)$")
RE_CBK_PTN = re.compile("^(\w+)$")
RE_AS_PTN = re.compile("^(\w+)\((.*?)\)$")
RE_COS_DATA_PTN = re.compile("^(\d+) (\w+) (.*) \((.*)\)$")
RE_COS_META_PTN = re.compile("^(\w+):(.*)$")
RE_CONS_SCAN_PTN = re.compile("^\[(\d+)-(\d+)\]\[(\d+)\](.*)$")
RE_PD_DATA_PTN = re.compile("^\((\w+)\)(.*)$")
RE_PD_META_PTN = re.compile("^(\w+):(.*)$")
RE_FORM_ELEM_PTN = re.compile("^\[(.*)\]\"(.*?)\"=\"(.*)\"$")
RE_JS_DUMP_PTN = re.compile("^DUMP:(\d+):(\d+)$")
RE_JS_STCK_PTN = re.compile("^STCK:(.*)$")
RE_JS_EXEC_PTN = re.compile("^EXEC:(\w+)$")
# utils
def path_raw(plat, tag, sample):
px = conf.prefix(plat, tag, sample)
return join(conf.OPT_PATH_RAW, "%s.%s" % (px, OPT_SUFFIX))
# structs
class Record:
def __init__(self, cat, sub, ptid):
self.cat = cat
self.sub = sub
self.ptid = ptid
def match(self, other):
return self.cat == other.cat and self.sub == other.sub
class Record_CBK(Record):
def __init__(self, ptid, name):
Record.__init__(self, "CBK", "", ptid)
self.name = name
def match(self, other):
return Record.match(self, other) and \
self.name == other.name
class Record_AS_RW(Record):
def __init__(self, ptid, path, act, pos, cnt):
Record.__init__(self, "AS", "RW", ptid)
self.path = path
self.act = act
self.pos = pos
self.cnt = cnt
def match(self, other):
return Record.match(self, other) and \
self.path == other.path and self.act == other.act and \
self.pos == other.pos and self.cnt == other.cnt
class Record_AS_OC(Record):
def __init__(self, ptid, path, act):
Record.__init__(self, "AS", "OC", ptid)
self.path = path
self.act = act
def match(self, other):
return Record.match(self, other) and \
self.path == other.path and self.act == other.act
class Record_COS_Data(Record):
def __init__(self, ptid, oid, otype, content, collects):
Record.__init__(self, "COS", "Data", ptid)
self.oid = oid
self.otype = otype
self.content = content
self.collects = collects
def match(self, other):
return Record.match(self, other) and \
self.oid == other.oid and self.otype == other.otype and \
self.content == other.content and \
self.collects == other.collects
class Record_COS_Meta(Record):
def __init__(self, ptid, key, val):
Record.__init__(self, "COS", "Meta", ptid)
self.key = key
self.val = val
def match(self, other):
return Record.match(self, other) and \
self.key == other.key and self.val == other.val
class Record_CONS_Scan(Record):
def __init__(self, ptid, otype, ocat, stack, content):
Record.__init__(self, "CONS", "Scan", ptid)
self.otype = otype
self.ocat = ocat
self.stack = stack
self.content = content
def match(self, other):
return Record.match(self, other) and \
self.otype == other.otype and self.ocat == other.ocat and \
self.stack == other.stack and self.content == other.content
class Record_PD_Data(Record):
def __init__(self, ptid, otype, content):
Record.__init__(self, "PD", "Data", ptid)
self.otype = otype
self.content = content
def match(self, other):
return Record.match(self, other) and \
self.otype == other.otype and self.content == other.content
class Record_PD_Meta(Record):
def __init__(self, ptid, key, val):
Record.__init__(self, "PD", "Meta", ptid)
self.key = key
self.val = val
def match(self, other):
return Record.match(self, other) and \
self.key == other.key and self.val == other.val
class Record_Form_Elem(Record):
def __init__(self, ptid, key, val):
Record.__init__(self, "Form", "Elem", ptid)
self.key = key
self.val = val
def match(self, other):
return Record.match(self, other) and \
self.key == other.key and self.val == other.val
class Record_JS_DUMP(Record):
def __init__(self, ptid, otype, count):
Record.__init__(self, "JS", "DUMP", ptid)
self.otype = otype
self.count = count
self.content = None
def setContent(self, content):
self.content = content
def match(self, other):
return Record.match(self, other) and \
self.otype == other.otype and self.count == other.count and \
self.content == other.content
class Record_JS_STCK(Record):
def __init__(self, ptid, content):
Record.__init__(self, "JS", "STCK", ptid)
self.content = content
def match(self, other):
return Record.match(self, other) and \
self.content == other.content
class Record_JS_EXEC(Record):
def __init__(self, ptid, otype):
Record.__init__(self, "JS", "EXEC", ptid)
self.otype = otype
self.content = None
def setContent(self, content):
self.content = content
def match(self, other):
return Record.match(self, other) and \
self.otype == other.otype and \
self.content == other.content
# transform
def transform(pn):
content = []
fixes = set()
cur = None
with open(pn, "r") as f:
for l in f:
l = l.rstrip()
if cur is not None:
if l == ">>>":
idx = len(content)
fixes.add(idx)
fixes.add(idx + 1)
content.append("<<<>>>".join(cur))
cur = None
else:
cur.append(l)
continue
if l == "<<<":
cur = []
continue
content.append(l)
result = []
for i, l in enumerate(content):
if i in fixes:
result[-1] = result[-1] + l
else:
result.append(l)
return result
# parser
def parse_asdoc(ptid, act, args):
if act == "open" or act == "close":
path = args.split(",")[0].strip().split("/")[-1]
return Record_AS_OC(ptid, path, act)
elif act == "read" or act == "write":
toks = args.split(",")
path = toks[0].strip().split("/")[-1]
pos = int(toks[1].strip())
cnt = int(toks[3].strip())
return Record_AS_RW(ptid, path, act, pos, cnt)
else:
raise Exception("Unknown action: %s" % act)
def parse_cosdoc(ptid, content):
m = RE_COS_DATA_PTN.match(content)
if m is not None:
return Record_COS_Data(ptid,
m.group(1), m.group(2), m.group(3), m.group(4))
m = RE_COS_META_PTN.match(content)
if m is not None:
return Record_COS_Meta(ptid, m.group(1), m.group(2))
raise Exception("Invalid COS line: %s" % content)
def parse_cons(ptid, content):
m = RE_CONS_SCAN_PTN.match(content)
if m is not None:
return Record_CONS_Scan(ptid,
m.group(1), m.group(2), m.group(3), m.group(4))
raise Exception("Invalid Cons line: %s" % content)
def parse_pddoc(ptid, content):
m = RE_PD_DATA_PTN.match(content)
if m is not None:
return Record_PD_Data(ptid,
m.group(1), m.group(2))
m = RE_PD_META_PTN.match(content)
if m is not None:
return Record_PD_Meta(ptid, m.group(1), m.group(2))
raise Exception("Invalid PD line: %s" % content)
def parse_form(ptid, content):
m = RE_FORM_ELEM_PTN.match(content)
if m is not None:
return Record_Form_Elem(ptid,
m.group(1), m.group(2))
raise Exception("Invalid Form line: %s" % content)
def parse_file(pn):
SECDICT = OrderedDict()
CURRENT = None
cont = None
pending = None
for l in transform(pn):
m = RE_LINE_PTN.match(l)
if m is None:
raise Exception("Invalid line: %s" % l)
ts = m.group(1)
ptid = m.group(2)
cat = m.group(3)
content = m.group(4)
if cat == "ERR":
raise Exception("Error detected: %s" % l)
elif cat == "SEC":
m = RE_SEC_PTN.match(content)
if m is None:
raise Exception("Invalid SEC line: %s" % l)
if m.group(2) == "BEGIN":
if CURRENT is not None:
raise Exception("Nested sections not allowed: %s" % l)
if m.group(1) in SECDICT:
raise Exception("Repeated section not allowed: %s" % l)
SECDICT[m.group(1)] = []
CURRENT = []
elif m.group(2) == "END":
if CURRENT is None:
raise Exception("Section ended without begin: %s" % l)
if m.group(1) not in SECDICT:
raise Exception("Section not captured in table: %s" % l)
SECDICT[m.group(1)].extend(CURRENT)
CURRENT = None
else:
raise Exception("Invalid section mark: %s" % l)
elif cat == "CBK":
if CURRENT is None:
continue
m = RE_CBK_PTN.match(content)
if m is None:
raise Exception("Invalid CBK line: %s" % l)
CURRENT.append(Record_CBK(ptid, m.group(1)))
continue
elif cat == "AS":
if CURRENT is None:
continue
m = RE_AS_PTN.match(content)
if m is None:
raise Exception("Invalid AS line: %s" % l)
act = m.group(1)
args = m.group(2)
CURRENT.append(parse_asdoc(ptid, act, args))
elif cat == "COS":
CURRENT.append(parse_cosdoc(ptid, content))
elif cat == "CONS":
CURRENT.append(parse_cons(ptid, content))
elif cat == "PD":
CURRENT.append(parse_pddoc(ptid, content))
elif cat == "FORM":
CURRENT.append(parse_form(ptid, content))
elif cat == "JS":
m = RE_JS_DUMP_PTN.match(content)
if m is not None:
pending = Record_JS_DUMP(ptid, m.group(1), m.group(2))
CURRENT.append(pending)
continue
m = RE_JS_EXEC_PTN.match(content)
if m is not None:
pending = Record_JS_EXEC(ptid, m.group(1))
CURRENT.append(pending)
continue
m = RE_JS_STCK_PTN.match(content)
if m is not None:
pending = Record_JS_STCK(ptid, m.group(1))
CURRENT.append(pending)
continue
raise Exception("Invalid JS line: %s" % l)
else:
raise Exception("Unknown category: %s" % l)
if CURRENT is not None:
SECDICT["Unclosed"] = []
SECDICT["Unclosed"].extend(CURRENT)
return SECDICT
# compare
def compare_records(r1, r2):
for k in r1:
if k not in r2:
return "Section mismatch: %s" % k
if k == "AVLoad":
continue
if k == "Drive":
continue
s1 = r1[k]
s2 = r2[k]
if len(s1) != len(s2):
return "Entry number mismatch: %s (%d vs %d)" % \
(k, len(s1), len(s2))
for i in range(len(s1)):
if not s1[i].match(s2[i]):
return "Entry mismatch: %s (%s vs %s)" % \
(k, s1[i], s2[i])
return "All matched (%d sections)" % len(r1)
def compare_plats(tag, sample):
p1 = path_raw("mac", tag, sample)
p2 = path_raw("win", tag, sample)
if exists(p1) and exists(p2):
return compare_records(parse_file(p1), parse_file(p2))
else:
return None
# main
if __name__ == "__main__":
act = sys.argv[1]
if act == "parse":
result = parse_file(sys.argv[2])
for k in result:
print "%s, %d" % (k, len(result[k]))
elif act == "compare":
r1 = parse_file(sys.argv[2])
r2 = parse_file(sys.argv[3])
result = compare_records(r1, r2)
if result is not None:
print result
| 12,657 | 28.165899 | 77 | py |
platpal | platpal-master/analyze/script.py | #!/usr/bin/env python
import os
import re
import sys
from os.path import exists, join, getsize
import conf
# consts
OPT_SUFFIX = "log"
# regex patterns
RE_LINE_PTN = re.compile("^\[(.*? .*?).*?\] (\d+/\d+) (\w+) (.*)$")
RE_CONS_PTN = re.compile("^\[(\d+)-(\d+)\]\[(\d+)\](.*)$")
# utils
def path_raw(plat, tag, sample):
px = conf.prefix(plat, tag, sample)
return join(conf.OPT_PATH_RAW, "%s.%s" % (px, OPT_SUFFIX))
def path_dump(plat, tag, sample):
px = conf.prefix(plat, tag, sample)
return join(conf.OPT_PATH_MID, "%s.%s" % (px, OPT_SUFFIX))
def parse(pn):
CONTENT = []
SECTION = False
CURRENT = None
with open(pn, "r") as f:
for l in f:
l = l.rstrip()
if CURRENT is not None:
if l == ">>>":
if SECTION:
CONTENT.append("\n".join(CURRENT))
SECTION = False
CURRENT = None
else:
CURRENT.append(l)
continue
if l == "<<<":
CURRENT = []
continue
m = RE_LINE_PTN.match(l)
if m is None:
continue
cat = m.group(3)
content = m.group(4)
if cat == "CONS":
m = RE_CONS_PTN.match(content)
if m is None:
raise Exception("Invalid CONS line: %s" % l)
if m.group(1) == "225":
SECTION = True
return CONTENT
def dump(content, pn):
with open(pn, "w") as f:
for l in content:
if "var" not in l and "function" not in l:
continue
if l[0] == '"' and l[-1] == '"':
l = l[1:-1]
print >> f, "<<<<<\n%s\n>>>>>\n" % l
if getsize(pn) == 0:
os.remove(pn)
# main
if __name__ == "__main__":
if len(sys.argv) < 3:
sys.exit("Usage: %s <list> <product> <platform>" % sys.argv[0])
pn = sys.argv[1]
tag = sys.argv[2]
plat = sys.argv[3]
with open(pn, "r") as f:
hashes = [l.strip().split(",")[0] for l in f]
for h in hashes:
praw = path_raw(plat, tag, h)
if not exists(praw):
continue
pdump = path_dump(plat, tag, h)
result = parse(praw)
if len(result) == 0:
continue
dump(result, pdump)
| 2,475 | 22.140187 | 75 | py |
platpal | platpal-master/analyze/collect.py | #!/usr/bin/env python
import sys
import strace
def process(plat, prod, hashes, detail = True):
fops = set()
nets = set()
exes = set()
abnormal = dict()
# find suitable parser
if plat == "mac":
fn = strace.parse_mac
elif plat == "win":
fn = strace.parse_win
# parse and collect
for h in hashes:
r = fn(prod, h)
if r is None:
continue
fops.update(r["fops"])
nets.update(r["nets"])
exes.update(r["exes"])
if len(r["fops"]) == 0 and len(r["nets"]) == 0 and len(r["exes"]) == 0:
continue
if h not in abnormal:
abnormal[h] = {"fops": [], "nets": [], "exes": []}
for i in r["fops"]:
abnormal[h]["fops"].append(i)
for i in r["nets"]:
abnormal[h]["nets"].append(i)
for i in r["exes"]:
abnormal[h]["exes"].append(i)
if detail:
for h in abnormal:
print "=== %s ===" % h
for i in abnormal[h]["fops"]:
print i
for i in abnormal[h]["nets"]:
print i
for i in abnormal[h]["exes"]:
print i
else:
for i in sorted(fops):
print i
for i in sorted(nets):
print i
for i in sorted(exes):
print i
if __name__ == "__main__":
if len(sys.argv) < 4:
sys.exit("Usage: %s <list> <platform> <product> <scan>" % sys.argv[0])
pn = sys.argv[1]
plat = sys.argv[2]
prod = sys.argv[3]
with open(pn, "r") as f:
hashes = [l.strip() for l in f]
if len(sys.argv) == 5 and sys.argv[4] == "-s":
process(plat, prod, hashes, False)
else:
process(plat, prod, hashes, True)
| 1,798 | 20.674699 | 79 | py |
platpal | platpal-master/analyze/compare.py | #!/usr/bin/env python
import sys
import plugin
def process(prod, hashes):
for h in hashes:
try:
result = plugin.compare_plats(prod, h)
except:
result = None
if result is not None:
print "=== %s ===" % h
print result
if __name__ == "__main__":
if len(sys.argv) != 3:
sys.exit("Usage: %s <list> <product>" % sys.argv[0])
pn = sys.argv[1]
prod = sys.argv[2]
with open(pn, "r") as f:
hashes = [l.strip() for l in f]
process(prod, hashes)
| 555 | 18.172414 | 60 | py |
platpal | platpal-master/analyze/conf.py | #!/usr/bin/env python
from os.path import abspath, exists, join
# paths
OPT_PATH_CUR = abspath(join(__file__, ".."))
OPT_PATH_RAW = abspath(join(OPT_PATH_CUR, "..", "vmware", "trace"))
OPT_PATH_MID = abspath(join(OPT_PATH_CUR, "data", "mid"))
OPT_PATH_END = abspath(join(OPT_PATH_CUR, "data", "end"))
OPT_DIR_FILTER = abspath(join(OPT_PATH_CUR, "filter"))
# consts
OPT_FN_SAMPLE = "Maldoc.pdf"
OPT_FN_TRACE = "Maldoc.out"
OPT_FN_LOG = "Maldoc.log"
# utils
def prefix(plat, tag, sample):
if plat == "win":
return "mpcc-x64-win-7-%s-%s" % (tag, sample)
elif plat == "mac":
return "mpcc-x64-mac-11-%s-%s" % (tag, sample)
else:
raise Exception("Unknown platform %s" % plat)
| 766 | 28.5 | 74 | py |
platpal | platpal-master/analyze/strace.py | #!/usr/bin/env python
import re
import json
from os.path import exists, join
from argparse import ArgumentParser
from collections import OrderedDict
import conf
# consts
OPT_SUFFIX = "out"
OPT_HOME_MAC = "/Users/bft/"
OPT_HOME_WIN = "C:\\Users\\bft\\"
# utils
def path_raw(plat, tag, sample):
px = conf.prefix(plat, tag, sample)
return join(conf.OPT_PATH_RAW, "%s.%s" % (px, OPT_SUFFIX))
def load_filter(pn):
patterns = []
with open(pn, "r") as f:
for l in f:
l = l.strip()
if len(l) == 0 or l[0] == '#':
continue
patterns.append(re.compile("^" + l + "$"))
if l.endswith("/.*"):
patterns.append(re.compile("^" + l[:-3] + "$"))
if l.endswith("\\\\.*"):
patterns.append(re.compile("^" + l[:-4] + "$"))
return patterns
def load_filter_chain(cat, plat, prod):
patterns = []
tag = prod[0:2]
pn = join(conf.OPT_DIR_FILTER, "%s-%s.txt" % (cat, plat))
if exists(pn):
patterns.extend(load_filter(pn))
pn = join(conf.OPT_DIR_FILTER, "%s-%s-%s.txt" % (cat, plat, tag))
if exists(pn):
patterns.extend(load_filter(pn))
pn = join(conf.OPT_DIR_FILTER, "%s-%s-%s.txt" % (cat, plat, prod))
if exists(pn):
patterns.extend(load_filter(pn))
return patterns
# parse
FILTER_FOP_MAC = []
FILTER_NET_MAC = []
FILTER_EXE_MAC = []
def decode_mac_fop(pnstr):
if pnstr[0] != '/':
pnstr = OPT_HOME_MAC + pnstr
skip = 0
toks = []
for t in reversed(pnstr.split("/")):
if len(t) == 0 or t == ".":
continue
if t == "..":
skip += 1
continue
if skip > 0:
skip -= 1
continue
toks.append(t)
return "/" + "/".join(reversed(toks))
def filter_mac_fop(pnstr):
parsed = decode_mac_fop(pnstr)
for f in FILTER_FOP_MAC:
if f.match(parsed) is not None:
return None
return parsed
def filter_mac_net(ustr):
for f in FILTER_NET_MAC:
if f.match(ustr) is not None:
return None
return ustr
def filter_mac_exe(pnstr):
base = pnstr
for f in FILTER_EXE_MAC:
if f.match(base) is not None:
return None
return base
def parse_mac_pin(act, args, ret, err):
return (act == "open") and (conf.OPT_FN_SAMPLE in args)
def parse_mac_exe(act, args, ret, err):
if act == "exeve" or act == "posix_spawn":
exe = filter_mac_exe(args[1:-1])
if exe is not None:
return {"arg": exe, "ret": ret, "err": err}
return None
def parse_mac_fop(act, args, ret, err):
if act == "open" or act == "open_extended" or act == "open_nocancel":
parg = args[1:-1]
pn = filter_mac_fop(parg)
if pn is not None:
return {"arg": pn, "ret": ret, "err": err}
elif act == "openat":
parg = args.split(",")[1].strip()[1:-1]
pn = filter_mac_fop(parg)
if pn is not None:
return {"arg": pn, "ret": ret, "err": err}
return None
def parse_mac_net(act, args, ret, err):
if act == "connect" or act == "connect_nocancel":
net = filter_mac_net(args)
if net is not None:
return {"arg": net, "ret": ret, "err": err}
return None
def parse_mac(tag, sample):
global FILTER_FOP_MAC
FILTER_FOP_MAC = load_filter_chain("fop", "mac", tag)
global FILTER_NET_MAC
FILTER_NET_MAC = load_filter_chain("net", "mac", tag)
global FILTER_EXE_MAC
FILTER_EXE_MAC = load_filter_chain("exe", "mac", tag)
RE_LINE = re.compile("^(\w+)/(\w+):\s+(\w+)\((.*)\)\s=\s(-?\w+)\s(\w+)$")
pinned = False
exes = []
fops = []
nets = []
pn = path_raw("mac", tag, sample)
if not exists(pn):
return None
with open(pn, "r") as f:
content = f.readlines()
for l in content:
l = l.strip()
if len(l) == 0:
continue
m = RE_LINE.match(l.strip())
if m is None:
raise Exception("Unmatched line: %s" % l)
act = m.group(3)
args = m.group(4)
ret = m.group(5)
err = m.group(6)
if not pinned:
pinned = parse_mac_pin(act, args, ret, err)
if not pinned:
continue
op_exe = parse_mac_exe(act, args, ret, err)
if op_exe is not None:
exes.append(op_exe)
continue
op_fop = parse_mac_fop(act, args, ret, err)
if op_fop is not None:
fops.append(op_fop)
continue
op_net = parse_mac_net(act, args, ret, err)
if op_net is not None:
nets.append(op_net)
continue
data = OrderedDict()
data["plat"] = "mac"
data["prod"] = tag
data["exes"] = sorted(set([e["arg"] for e in exes]))
data["nets"] = sorted(set([e["arg"] for e in nets]))
data["fops"] = sorted(set([e["arg"] for e in fops]))
return data
FILTER_FOP_WIN = []
FILTER_NET_WIN = []
FILTER_EXE_WIN = []
def decode_win_fop(pnstr):
pnstr = pnstr.lower()
skip = 0
toks = []
for t in reversed(pnstr.split("\\")):
if len(t) == 0 or t == ".":
continue
if t == "..":
skip += 1
continue
if skip > 0:
skip -= 1
continue
toks.append(t)
return "\\".join(reversed(toks))
def filter_win_fop(pnstr):
parsed = decode_win_fop(pnstr)
for f in FILTER_FOP_WIN:
if f.match(parsed) is not None:
return None
return parsed
def filter_win_net(ustr):
for f in FILTER_NET_WIN:
if f.match(ustr) is not None:
return None
return ustr
def filter_win_exe(pnstr):
base = pnstr.split("\\")[-1]
for f in FILTER_EXE_WIN:
if f.match(base) is not None:
return None
return base
def parse_win_pin(act, args, ret, err):
return (act == "CreateFile") and (conf.OPT_FN_SAMPLE in args)
def parse_win_exe(act, args, ret, err):
if act == "Process Create":
if ret == "SUCCESS":
err = None
else:
err = ret
ret = "FAILURE"
exe = filter_win_exe(args)
if exe is not None:
return {"arg": exe, "ret": ret, "err": err}
return None
def parse_win_fop(act, args, ret, err):
if act == "CreateFile" or act == "CreateFileMapping" or act == "Load Image":
if ret == "SUCCESS":
err = None
else:
err = ret
ret = "FAILURE"
pn = filter_win_fop(args)
if pn is not None:
return {"arg": pn, "ret": ret, "err": err}
return None
def parse_win_net(act, args, ret, err):
if act == "TCP Connect":
args = args.split("->")[1].strip()
if ret == "SUCCESS":
err = None
else:
err = ret
ret = "FAILURE"
url = filter_win_net(args)
if url is not None:
return {"arg": url, "ret": ret, "err": err}
return None
def parse_win(tag, sample):
global FILTER_FOP_WIN
FILTER_FOP_WIN = load_filter_chain("fop", "win", tag)
global FILTER_NET_WIN
FILTER_NET_WIN = load_filter_chain("net", "win", tag)
global FILTER_EXE_WIN
FILTER_EXE_WIN = load_filter_chain("exe", "win", tag)
RE_LINE = re.compile('"(.*?)","(.*?)","(.*?)","(.*?)","(.*?)","(.*?)","(.*?)"')
pinned = False
exes = []
fops = []
nets = []
pn = path_raw("win", tag, sample)
if not exists(pn):
return None
with open(pn, "r") as f:
f.readline()
f.readline()
content = f.readlines()
for l in content:
l = l.strip()
if len(l) == 0:
continue
m = RE_LINE.search(l)
if m is None:
raise Exception("Unmatched line: %s" % l)
act = m.group(4)
args = m.group(5)
ret = m.group(6)
err = m.group(7)
if not pinned:
pinned = parse_win_pin(act, args, ret, err)
if not pinned:
continue
op_exe = parse_win_exe(act, args, ret, err)
if op_exe is not None:
exes.append(op_exe)
continue
op_fop = parse_win_fop(act, args, ret, err)
if op_fop is not None:
fops.append(op_fop)
continue
op_net = parse_win_net(act, args, ret, err)
if op_net is not None:
nets.append(op_net)
continue
data = OrderedDict()
data["plat"] = "win"
data["prod"] = tag
data["exes"] = sorted(set([e["arg"] for e in exes]))
data["nets"] = sorted(set([e["arg"] for e in nets]))
data["fops"] = sorted(set([e["arg"] for e in fops]))
return data
# main
if __name__ == "__main__":
# parser
parser = ArgumentParser()
parser.add_argument("plat", help="Platform",
choices=("mac", "win"))
parser.add_argument("tag", help="Tag",
choices=(
"dc.07.20033",
"dc.08.20082",
"dc.09.20069",
"dc.10.20056",
"dc.16.20039",
"dc.16.20045",
"dc.17.20050",
"15.06.30033",
"11.0.00",
"11.0.10",
"10.0.0",
"10.1.0"
))
parser.add_argument("sample", help="Sample")
args = parser.parse_args()
# run
if args.plat == "mac":
data = parse_mac(args.tag, args.sample)
elif args.plat == "win":
data = parse_win(args.tag, args.sample)
else:
raise Exception("Unknown platform %s" % args.plat)
print json.dumps(data, indent = 2)
| 9,801 | 22.505995 | 83 | py |
platpal | platpal-master/acrobat/driver/gen/template.py | #!/usr/bin/env python
import re
from os.path import abspath, dirname, exists, join
from enum import Enum
from collections import OrderedDict
# paths
OPT_DIR_ROOT = abspath(dirname(dirname(__file__)))
OPT_DIR_HOOK = join(OPT_DIR_ROOT, "gen", "hook")
OPT_FILE_LOGIC = join(OPT_DIR_ROOT, "callback-logic.cpp")
OPT_FILE_REGISTER = join(OPT_DIR_ROOT, "callback-register.cpp")
OPT_LIST_HOOK = join(OPT_DIR_ROOT, "gen", "hooks.list")
# code gen
OPT_PREFIX_HOOK = "on"
OPT_SUFFIX_NSEL = "NSEL"
# misc
OPT_TAB_SIZE = 2
# utils
def fcode(code, prefix):
return (prefix + code.replace("\n", "\n" + prefix)).expandtabs(OPT_TAB_SIZE)
def memberize(members, name, sep):
stmts = []
for member in members:
generated = getattr(member, "gen_%s" % name)()
if generated is not None and len(generated) != 0:
stmts.append(generated)
return sep.join(stmts)
def templatize(obj, path):
code = []
with open(path, "r") as f:
contents = f.readlines()
for line in contents:
line = line.strip("\n")
matches = OPT_PTN_ELEM.findall(line)
for element in matches:
target = "{{%s}}" % element
prefix = line[0:len(line) - len(line.lstrip())]
generated = getattr(obj, "gen_%s" % element)()
formatted = fcode(generated, prefix)
line = line.replace(target, formatted)
code.append(line)
return "\n".join(code)
def commentize(objs, func, path):
stmts = []
for obj in objs:
generated = getattr(obj, "gen_%s" % func)()
formatted = fcode(generated, "")
stmts.append(formatted)
with open(path, "w") as f:
f.writelines("\n".join(stmts))
# core
class Param:
def __init__(self, defs, name):
self.defs = defs
self.name = name
def gen_def(self):
return "%s %s" % (self.defs, self.name)
def gen_use(self):
return self.name
class Hook:
def __init__(self, name, *args):
self.name = name
self.args = list(args)
self.args.append(Param("void *", "aux"))
def gen_name_hook(self):
return "%s%s" % (OPT_PREFIX_HOOK, self.name)
def gen_name_nsel(self):
return "%s%s" % (self.name, OPT_SUFFIX_NSEL)
def gen_args_def(self):
return memberize(self.args, "def", ",\n\t\t")
def gen_args_use(self):
return memberize(self.args, "use", ", ")
def gen_proto(self):
return \
'''ACCB1 void %s(
\t\t%s)''' % (self.gen_name_hook(), self.gen_args_def())
def gen_logic(self):
pn = join(OPT_DIR_HOOK, "%s.cpp" % self.name)
if exists(pn):
ctnt = templatize(self, pn)
else:
ctnt = 'LOG_CBK("%s");' % self.name
return \
'''%s
{
\t%s
}''' % (self.gen_proto(), ctnt)
def gen_register(self):
return \
'''AVAppRegisterNotification(%s,
\t\tgExtensionID,
\t\t(void *)%s,
\t\tNULL);''' % (self.gen_name_nsel(), self.gen_name_hook())
def gen_unregister(self):
return \
'''AVAppUnregisterNotification(%s,
\t\tgExtensionID,
\t\t(void *)%s,
\t\tNULL);''' % (self.gen_name_nsel(), self.gen_name_hook())
# factory
HOOK_PROTO_PATTERN = re.compile(
"(.+) (\w+)\((.*), void(?:\* | \*)clientData\)"
)
def parse_hook(proto):
m = HOOK_PROTO_PATTERN.match(proto)
if m is None:
raise Exception("Input does not match: %s" % proto)
retn = m.group(1)
name = m.group(2)
args = m.group(3)
params = []
for a in args.split(", "):
a = a.split(" ")
params.append(Param(a[0], a[1]))
return Hook(name, *params)
| 3,742 | 23.625 | 80 | py |
platpal | platpal-master/acrobat/driver/gen/gen.py | #!/usr/bin/env python
from template import *
if __name__ == "__main__":
group = []
with open(OPT_LIST_HOOK, "r") as f:
for line in f:
line = line.strip()
hook = parse_hook(line)
group.append(hook)
commentize(group, "logic", OPT_FILE_LOGIC)
commentize(group, "register", OPT_FILE_REGISTER)
| 353 | 21.125 | 52 | py |
platpal | platpal-master/acrobat/driver/gen/parse.py | #!/usr/bin/env python
import re
import os
import sys
OPT_PTN_POKE = re.compile(
"^POKE\s*\(\s*(\w+?)\s*,\s*\(.+?\),\s*\((.+?)\),\s*\(.+?\)\s*\)$"
)
OPT_BLACKLIST = [
"PDDocPermsReady",
"PDEContainerXAPMetadataDidChange",
"PDPageGetPrintMatrix",
"AVDocWillRefreshUI"
]
if __name__ == "__main__":
if len(sys.argv) != 2:
sys.exit("Usage: %s <path-to PIPokes.h>" % __file__)
pn = sys.argv[1]
if not os.path.exists(pn):
sys.exit("%s does not exist" % pn)
pokes = []
with open(pn, "r") as f:
cur = None
for l in f:
l = l.rstrip()
if cur is None:
if not l.startswith("POKE"):
continue
m = OPT_PTN_POKE.match(l)
if m is not None:
pokes.append({
"name": m.group(1),
"args": m.group(2),
"full": m.group(0)
})
else:
if l.endswith("\\"):
l = l[:-1]
cur = l
else:
if l.endswith("\\"):
l = l[:-1]
cur = cur + l
m = OPT_PTN_POKE.match(cur)
if m is not None:
pokes.append({
"name": m.group(1).strip(),
"args": m.group(2).strip(),
"full": m.group(0)
})
cur = None
for p in pokes:
if p["name"].startswith("AVApp"):
continue
if p["name"] in OPT_BLACKLIST:
continue
print "void %s(%s)" % (p["name"].strip(), p["args"].strip())
| 1,837 | 24.178082 | 73 | py |
platpal | platpal-master/acrobat/driver/build/win.py | #!/usr/bin/env python
import os
from os.path import basename, join
from shutil import copy, copytree
from conf import *
# const
PROJ_EXTN = "api"
PROJ_FULL = "%s.%s" % (PROJ_NAME, PROJ_EXTN)
PROJ_ARCH = "x86"
# paths
FILE_SYMS = join(PATH_INF, "Symbols.exp")
PATH_DLIB = join(PATH_OBJ, "%s.lib" % PROJ_NAME)
PATH_EXEC = join(PATH_OBJ, PROJ_NAME)
PATH_PROD = join(PATH_BIN, PROJ_FULL)
PATH_LOAD = "/cygdrive/c/Program Files (x86)/Adobe/Acrobat Reader DC/Reader/plug_ins/"
# lists
LIST_DEFS = COMMON_DEFS
LIST_DEFS.extend([
"ACRO_SDK_LEVEL=0x00090000",
"WIN_PLATFORM",
"WIN_ENV",
"WIN32",
"_WINDLL",
"_WINDOWS"
])
LIST_INCS = COMMON_INCS
LIST_INCS.extend([
join(PATH_OST, "include", "ucrt"),
join(PATH_OST, "include", "um"),
join(PATH_OST, "include", "winrt"),
join(PATH_OST, "include", "shared"),
join(PATH_CRT, "include")
])
LIST_SRCS = COMMON_SRCS
LIST_LIBS = [
join(PATH_OST, "lib", "ucrt", PROJ_ARCH),
join(PATH_OST, "lib", "um", PROJ_ARCH),
join(PATH_CRT, "lib")
]
LIST_LSYS = [
"odbc32.lib",
"odbccp32.lib",
"kernel32.lib",
"user32.lib",
"gdi32.lib",
"winspool.lib",
"comdlg32.lib",
"advapi32.lib",
"shell32.lib",
"ole32.lib",
"oleaut32.lib",
"uuid.lib"
]
LIST_PCHS = COMMON_PCHS
# confs
CONF_CC = [
"cl",
"/c",
"/Od /Oy-",
"/Gd",
"/MTd"
]
CONF_LD = [
"link",
"/DLL",
"/MACHINE:X86",
"/SUBSYSTEM:WINDOWS",
"/TLBID:1",
"/NXCOMPAT",
"/DYNAMICBASE",
"/MANIFEST",
"\"/MANIFESTUAC:level='asInvoker' uiAccess='false'\"",
"/manifest:embed"
]
# comps
COMP_OBJS = []
# utils
def winpath(pn):
return check_output("cygpath -w %s" % pn, shell=True).strip()
# core
def comp_one(src):
fin = basename(src)
fout = fin.split(".")[0] + ".o"
fn = join(PATH_OBJ, fout)
cmd = \
" ".join(CONF_CC) + " " + \
" ".join(["/D %s" % e for e in LIST_DEFS]) + " " + \
" ".join(["/I\"%s\"" % winpath(e) for e in LIST_INCS]) + " " + \
"/Fo\"%s\"" % winpath(fn) + " " + \
"\"%s\"" % winpath(src)
shell(cmd)
COMP_OBJS.append(fn)
def comp():
for e in LIST_SRCS:
comp_one(e)
def link():
binpath = abspath(join(PATH_OST, "bin", "x86"))
os.environ["PATH"] = winpath(binpath) + ":" + os.environ["PATH"]
with open(FILE_SYMS, "r") as f:
syms = [e.strip() for e in f.readlines()]
cmd = \
" ".join(CONF_LD) + " " + \
" ".join(["/LIBPATH:\"%s\"" % winpath(e) for e in LIST_LIBS]) + " " + \
"/INCREMENTAL %s" % " ".join(LIST_LSYS) + " " + \
" ".join(["/EXPORT:%s" % e for e in syms]) + " " + \
"/IMPLIB:\"%s\"" % winpath(PATH_DLIB) + " " + \
"/OUT:\"%s\"" % winpath(PATH_EXEC) + " " + \
" ".join(["\"%s\"" % winpath(e) for e in COMP_OBJS])
shell(cmd)
def bdle():
create(PATH_BIN);
copy(PATH_EXEC, PATH_PROD)
def sign():
cmd = "%s -kp \"%s\" -cf \"%s\" \"%s\"" % (
join(PATH_KEY, "sign"),
winpath(join(PATH_KEY, "keypair")),
winpath(join(PATH_KEY, "cert")),
winpath(PATH_PROD))
shell(cmd)
| 3,375 | 21.965986 | 86 | py |
platpal | platpal-master/acrobat/driver/build/run.py | #!/usr/bin/env python
import sys
from os import remove
from os.path import exists, isfile, join
from shutil import copy, copytree, rmtree
from conf import *
# set platform
if HOST == "mac":
from mac import *
elif HOST == "win":
from win import *
else:
exit("Unknown host: %s" % HOST)
# paths
FROM = PATH_PROD
DEST = join(PATH_LOAD, PROJ_FULL)
# func
def share():
DEST = SHARED + "/" + PROJ_ARCH + "/" + PROJ_FULL
if exists(DEST):
if isfile(DEST):
remove(DEST)
else:
rmtree(DEST)
if HOST == "mac":
copytree(FROM, DEST)
elif HOST == "win":
copy(FROM, DEST)
else:
exit("Unknown host: %s" % HOST)
def build():
create(join(PATH_OBJ))
comp()
link()
bdle()
sign()
share()
print "Build finished"
def uninstall():
if exists(DEST):
if isfile(DEST):
remove(DEST)
else:
rmtree(DEST)
def install():
uninstall()
if isfile(FROM):
copy(FROM, DEST)
else:
copytree(FROM, DEST)
# main
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.exit("Usage: %s <build|install|uninstall|share> <opt>" % \
sys.argv[0])
cmd = sys.argv[1]
try:
if cmd == "build":
if len(sys.argv) == 3:
opt = sys.argv[2]
if opt == "-v":
LIST_DEFS.append("OPT_UI_CHECK")
elif opt == "-f":
LIST_DEFS.append("OPT_UI_CHECK")
LIST_DEFS.append("OPT_FULL_CHECK")
else:
raise Exception("Invalid option: %s" % opt)
build()
elif cmd == "install":
install()
elif cmd == "uninstall":
uninstall()
elif cmd == "share":
share()
else:
sys.exit("Unknown command: %s" % sys.argv[1])
except OSError as err:
sys.exit(err.strerror)
except IOError as err:
sys.exit(err.strerror)
| 2,041 | 19.42 | 70 | py |
platpal | platpal-master/acrobat/driver/build/conf.py | #!/usr/bin/env python
from os import makedirs
from os.path import abspath, exists, join
from platform import system
from shutil import rmtree
from subprocess import check_output, CalledProcessError
from sys import exit
# host
HOST = system()
if HOST == "Darwin":
HOST = "mac"
SHARED = "/Users/meng/Shared/build/mac/"
elif HOST == "CYGWIN_NT-10.0":
HOST = "win"
SHARED = "/cygdrive/z/shared/build/win/"
else:
exit("Unknown host: %s" % HOST)
# consts
PROJ_NAME = "Driver"
# paths
PATH_CUR = abspath(join(__file__, ".."))
PATH_OBJ = abspath(join(PATH_CUR, "obj"))
PATH_BIN = abspath(join(PATH_CUR, "bin"))
PATH_INF = abspath(join(PATH_CUR, "inf", HOST))
PATH_SRC = abspath(join(PATH_CUR, ".."))
PATH_DEP = abspath(join(PATH_CUR, "..", "..", HOST))
PATH_OST = abspath(join(PATH_DEP, "ost", "link"))
PATH_CRT = abspath(join(PATH_DEP, "crt", "link"))
PATH_SDK = abspath(join(PATH_DEP, "sdk", "link"))
PATH_KEY = abspath(join(PATH_DEP, "key"))
# lists
COMMON_DEFS = [
"PDMETADATA_HFT=1",
"PLUGIN=1",
"READER_PLUGIN=1",
"AS_DEBUG=1",
"DEBUG=1",
"_DEBUG=1",
]
COMMON_INCS = [
join(PATH_SDK, "SDK"),
join(PATH_SDK, "API"),
PATH_SRC
]
COMMON_SRCS = [
join(PATH_SRC, "log.cpp"),
join(PATH_SRC, "util.cpp"),
join(PATH_SRC, "fs.cpp"),
join(PATH_SRC, "cos.cpp"),
join(PATH_SRC, "cons.cpp"),
join(PATH_SRC, "pd.cpp"),
join(PATH_SRC, "form.cpp"),
join(PATH_SRC, "act.cpp"),
join(PATH_SRC, "driver.cpp"),
join(PATH_SDK, "API", "PIMain.c")
]
COMMON_PCHS = [
join(PATH_SDK, "SDK", "PIHeaders++.pch")
]
# utils
def create(pn):
if exists(pn):
rmtree(pn)
try:
makedirs(pn)
except Exception as err:
print err
exit("Script stopped with error")
def shell(cmd):
print cmd
try:
output = check_output(cmd, shell=True)
result = 0
except CalledProcessError as err:
output = err.output
result = err.returncode
if isinstance(output, basestring):
print output
else:
for l in output:
print l
if result != 0:
exit("Script stoped with error")
| 2,268 | 22.153061 | 55 | py |
platpal | platpal-master/acrobat/driver/build/mac.py | #!/usr/bin/env python
from os.path import basename, join
from shutil import copy, copytree
from conf import *
# const
PROJ_EXTN = "acroplugin"
PROJ_FULL = "%s.%s" % (PROJ_NAME, PROJ_EXTN)
PROJ_ARCH = raw_input("arch: ")
# paths
FILE_PINF = join(PATH_INF, "Info.plist")
FILE_SYMS = join(PATH_INF, "Symbols.exp")
FILE_LOCL = join(PATH_INF, "English.lproj")
PATH_EXEC = join(PATH_OBJ, PROJ_NAME)
PATH_PROD = join(PATH_BIN, PROJ_FULL)
PATH_LOAD = "/Users/meng/Library/Application Support/Adobe/Acrobat/DC/Plug-ins"
# lists
LIST_DEFS = COMMON_DEFS
LIST_DEFS.extend([
"MAC_PLATFORM",
"DISABLECPLUSPLUSDURING=0",
"EXCEPTIONS_ALWAYS_CPLUSPLUS=1"
])
LIST_INCS = COMMON_INCS
LIST_SRCS = COMMON_SRCS
LIST_PCHS = COMMON_PCHS
# confs
CONF_CC = [
"clang",
"-x objective-c++",
"-arch %s" % PROJ_ARCH,
"-O0",
"-isysroot %s" % PATH_OST,
"-Wno-comment"
]
CONF_LD = [
"clang++",
"-bundle",
"-arch %s" % PROJ_ARCH,
"-dead_strip",
"-isysroot %s" % PATH_OST,
"-fobjc-link-runtime"
]
# comps
COMP_OBJS = []
# core
def comp_one(src):
fin = basename(src)
fout = fin.split(".")[0] + ".o"
fn = join(PATH_OBJ, fout)
cmd = \
" ".join(CONF_CC) + " " + \
" ".join(["-D%s" % e for e in LIST_DEFS]) + " " + \
" ".join(["-I%s" % e for e in LIST_INCS]) + " " + \
" ".join(["-include %s" % e for e in LIST_PCHS]) + " " + \
"-c %s" % src + " " + \
"-o %s" % fn
shell(cmd)
COMP_OBJS.append(fn)
def comp():
for e in LIST_SRCS:
comp_one(e)
def link():
cmd = \
" ".join(CONF_LD) + " " + \
"-exported_symbols_list %s" % FILE_SYMS + " " + \
" ".join(COMP_OBJS) + " " + \
"-o %s" % PATH_EXEC
shell(cmd)
def bdle():
path_root = join(PATH_BIN, "%s.%s" % (PROJ_NAME, PROJ_EXTN))
path_base = join(path_root, "Contents")
path_code = join(path_base, "MacOS")
path_ress = join(path_base, "Resources")
create(path_base)
create(path_code)
create(path_ress)
copy(PATH_EXEC, path_code)
copy(FILE_PINF, path_base)
copytree(FILE_LOCL, join(path_ress, basename(FILE_LOCL)))
def sign():
cmd = "%s -kp %s -cf %s %s" % (
join(PATH_KEY, "sign"),
join(PATH_KEY, "keypair"), join(PATH_KEY, "cert"),
PATH_PROD)
shell(cmd)
| 2,458 | 21.354545 | 79 | py |
platpal | platpal-master/vmware/scan-product.py | #!/usr/bin/env python
import os
import sys
import json
from os.path import abspath, basename, dirname, exists, join
from subprocess import call
from collections import OrderedDict
# path
OPT_WKS = abspath(dirname(__file__))
OPT_DIR_VTDOC = join(OPT_WKS, "vtdoc", "analyze")
# enums
OPT_ENUM_PLAT = ["mac", "win"]
# commands
OPT_CMD_BOX = join(OPT_WKS, "box.py")
# main
if __name__ == "__main__":
prod = sys.argv[1]
fn = join(OPT_DIR_VTDOC, "product", "%s.json" % prod)
with open(fn, "r") as f:
cves = json.load(f, object_pairs_hook=OrderedDict)
for c in cves:
samples = cves[c]
for s in samples:
for p in OPT_ENUM_PLAT:
cmd = ["python", OPT_CMD_BOX, p, prod, s]
call(cmd)
| 812 | 22.228571 | 60 | py |
platpal | platpal-master/vmware/box.py | #!/usr/bin/env python
import os
import sys
import json
import signal
import requests
from os.path import abspath, basename, dirname, exists, expanduser, getsize, join
from shutil import copy
from time import sleep
from enum import Enum
from hashlib import sha256
from subprocess import call, check_output, Popen, STDOUT
from threading import Thread
from argparse import ArgumentParser
# paths
OPT_WKS = abspath(dirname(__file__))
OPT_DIR_VTDOC = join(OPT_WKS, "vtdoc", "sample")
OPT_DIR_UTILS = join(OPT_WKS, "utils")
OPT_DIR_TRACE = join(OPT_WKS, "trace")
OPT_DIR_VIDEO = join(OPT_WKS, "video")
OPT_DIR_SHARE = join(expanduser("~"), "Shared", "build")
OPT_DIR_VMCMD = join("/", "Applications",
"VMware Fusion.app", "Contents", "Library")
OPT_DIR_VMIMG = join(expanduser("~"), "Documents",
"Virtual Machines.localized")
OPT_URL_VTDOC = "http://meng.gtisc.gatech.edu:9000/sample/"
# commands
OPT_CMD_VMRUN = join(OPT_DIR_VMCMD, "vmrun")
# consts
OPT_VM_USER = "bft"
OPT_VM_PASS = "bft"
OPT_VM_LIFE = 30
OPT_VM_REST = 5
OPT_VM_SIZE = 1024 * 1024 * 4 # 4MB
OPT_VM_TASK = "work"
OPT_VM_PROG = {
"mac": {
"10": "/Applications/Adobe\\ Reader.app/Contents/MacOS/AdobeReader",
"11": "/Applications/Adobe\\ Reader.app/Contents/MacOS/AdobeReader",
"15": "/Applications/Adobe\\ Acrobat\\ Reader\\ 2015.app/Contents/MacOS/AdobeReader",
"dc": "/Applications/Adobe\\ Acrobat\\ Reader\\ DC.app/Contents/MacOS/AdobeReader"
},
"win": {
"10": "C:\\Program Files (x86)\\Adobe\Reader 10.0\\Reader\\AcroRd32.exe",
"11": "C:\\Program Files (x86)\\Adobe\Reader 11.0\\Reader\\AcroRd32.exe",
"15": "C:\\Program Files (x86)\\Adobe\Acrobat Reader 2015\\Reader\\AcroRd32.exe",
"dc": "C:\\Program Files (x86)\\Adobe\Acrobat Reader DC\\Reader\\AcroRd32.exe"
}
}
OPT_VM_PLUG = {
"mac": {
"10": "/Users/bft/Library/Application Support/Adobe/Acrobat/10.0/Plug-ins",
"11": "/Users/bft/Library/Application Support/Adobe/Acrobat/11.0/Plug-ins",
"15": "/Users/bft/Library/Application Support/Adobe/Acrobat/2015/Plug-ins",
"dc": "/Users/bft/Library/Application Support/Adobe/Acrobat/DC/Plug-ins"
},
"win": {
"10": "C:\\Program Files (x86)\\Adobe\\Reader 10.0\\Reader\\plug_ins",
"11": "C:\\Program Files (x86)\\Adobe\\Reader 11.0\\Reader\\plug_ins",
"15": "C:\\Program Files (x86)\\Adobe\\Acrobat Reader 2015\\Reader\\plug_ins",
"dc": "C:\\Program Files (x86)\\Adobe\\Acrobat Reader DC\\Reader\\plug_ins"
}
}
OPT_VM_ARCH = {
"mac": {
"10": "i386",
"11": "i386",
"15": "x86_64",
"dc": "x86_64"
},
"win": {
"10": "x86",
"11": "x86",
"15": "x86",
"dc": "x86"
}
}
# names
OPT_FN_SAMPLE = "Maldoc.pdf"
OPT_FN_TRACE = "Maldoc.out"
OPT_FN_LOG = "Maldoc.log"
# utils
class Command(object):
def __init__(self, cmd):
self.cmd = cmd
self.process = None
def run(self, timeout):
def target():
self.process = Popen(self.cmd)
self.process.communicate()
thread = Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
return self.process.returncode
def exec_and_wait(cmd):
return call(cmd) == 0
def exec_and_check(cmd):
try:
return check_output(cmd)
except:
return None
def exec_and_interact(cmd):
fnull = open(os.devnull, "w")
return Popen(cmd, stdout=fnull, stderr=STDOUT, env=os.environ)
def exec_and_timeout(cmd, timeout):
return Command(cmd).run(timeout) == 0
# defs
class Machine:
def __init__(self, arch, plat, vern, home, bash):
self.arch = arch
self.plat = plat
self.vern = vern
self.home = home
self.bash = bash
self.name = "mpcc-%s-%s-%s" % (arch, plat, vern)
self.path = join(OPT_DIR_VMIMG,
"%s.vmwarevm" % self.name, "%s.vmx" % self.name)
def __prepare_sample(self, sample):
pn = join(OPT_DIR_VTDOC, sample)
if not exists(pn):
r = requests.get(OPT_URL_VTDOC + sample)
if r.status_code != 200:
raise Exception("Error downloading sample")
with open(pn, "w") as f:
f.write(r.content)
with open(pn, "r") as f:
dg = sha256(f.read()).hexdigest()
if dg != sample:
raise Exception("Error matching sample")
if getsize(pn) > OPT_VM_SIZE:
raise Exception("File too big")
def __launch_vm(self, tag):
# restore snapshot
cmd = [OPT_CMD_VMRUN, "revertToSnapshot", self.path,
"%s-%s" % (OPT_VM_TASK, tag)]
if not exec_and_wait(cmd):
raise Exception("Error reverting VM")
# start the vm
cmd = [OPT_CMD_VMRUN, "start", self.path, "gui"]
if not exec_and_wait(cmd):
raise Exception("Error starting VM")
def __shutdown_vm(self):
cmd = [OPT_CMD_VMRUN, "stop", self.path, "hard"]
if not exec_and_wait(cmd):
raise Exception("Error stopping VM")
def __mkdir(self, pn):
cmd = [OPT_CMD_VMRUN,
"-gu", OPT_VM_USER, "-gp", OPT_VM_PASS,
"createDirectoryInGuest", self.path,
pn]
if not exec_and_wait(cmd):
raise Exception("Error creating %s in VM" % pn)
def __transfer(self, pl, pr):
cmd = [OPT_CMD_VMRUN,
"-gu", OPT_VM_USER, "-gp", OPT_VM_PASS,
"copyFileFromHostToGuest", self.path,
pl, pr]
if not exec_and_wait(cmd):
raise Exception("Error creating %s in VM" % pr)
def __upload(self, fn, pn):
pvm = self.home % fn
cmd = [OPT_CMD_VMRUN,
"-gu", OPT_VM_USER, "-gp", OPT_VM_PASS,
"copyFileFromHostToGuest", self.path,
pn, pvm]
if not exec_and_wait(cmd):
raise Exception("Error uploading %s to VM" % pn)
return pvm
def __download(self, fn, pn):
pvm = self.home % fn
cmd = [OPT_CMD_VMRUN,
"-gu", OPT_VM_USER, "-gp", OPT_VM_PASS,
"copyFileFromGuestToHost", self.path,
pvm, pn]
if not exec_and_wait(cmd):
raise Exception("Error downloading %s to host" % pn)
return pvm
def __prepare_context(self, tag, sample):
# upload sample
pvm = self.__upload(OPT_FN_SAMPLE, join(OPT_DIR_VTDOC, sample))
# upload tracer
if self.plat == "mac":
ptracer = self.__upload("DTrace.sh",
join(OPT_DIR_UTILS, "DTrace.sh"))
elif self.plat == "win":
ptracer = self.__upload("Procmon.exe",
join(OPT_DIR_UTILS, "Procmon.exe"))
pconfig = self.__upload("Procmon.pmc",
join(OPT_DIR_UTILS, "Procmon.pmc"))
else:
raise Exception("Unknown platform: %s" % self.plat)
# upload plugin
plug = OPT_VM_PLUG[self.plat][tag[0:2]]
lcur = join(OPT_DIR_SHARE, self.plat, OPT_VM_ARCH[self.plat][tag[0:2]])
if self.plat == "mac":
dn = "/Driver.acroplugin"
self.__mkdir(plug + dn)
dn = "/Driver.acroplugin/Contents"
self.__mkdir(plug + dn)
fn = "/Info.plist"
self.__transfer(lcur + dn + fn, plug + dn + fn)
dn = "/Driver.acroplugin/Contents/MacOS"
self.__mkdir(plug + dn)
fn = "/Driver"
self.__transfer(lcur + dn + fn, plug + dn + fn)
fn = "/Reader.cer"
self.__transfer(lcur + dn + fn, plug + dn + fn)
fn = "/Reader.dig"
self.__transfer(lcur + dn + fn, plug + dn + fn)
dn = "/Driver.acroplugin/Contents/Resources"
self.__mkdir(plug + dn)
dn = "/Driver.acroplugin/Contents/Resources/English.lproj"
self.__mkdir(plug + dn)
fn = "/InfoPlist.strings"
self.__transfer(lcur + dn + fn, plug + dn + fn)
elif self.plat == "win":
fn = "Driver.api"
self.__transfer(lcur + "/" + fn, plug + "\\" + fn)
else:
raise Exception("Unknown platform: %s" % self.plat)
# get program path
prog = OPT_VM_PROG[self.plat][tag[0:2]]
# build script
if self.plat == "mac":
script = \
'''
"%s" "%s" "%s" -u %s "%s"
''' % (ptracer, self.home % OPT_FN_TRACE,
self.home % "sudo", OPT_VM_USER,
prog)
elif self.plat == "win":
script = \
'''
start "" "%s" /accepteula /quiet /minimized /loadconfig "%s" /backingfile "%s"
"%s" /accepteula /waitforidle
start "" /wait "%s"
"%s" /terminate
"%s" /accepteula /loadconfig "%s" /openlog "%s" /saveapplyfilter /saveas "%s"
move "%s" "%s"
''' % (ptracer, pconfig, self.home % "Maldoc.pml",
ptracer,
prog,
ptracer,
ptracer, pconfig, self.home % "Maldoc.pml", self.home % "Maldoc.csv",
self.home % "Maldoc.csv", self.home % OPT_FN_TRACE)
else:
raise Exception("Unknown platform: %s" % self.plat)
return script
def __run_script(self, script):
cmd = [OPT_CMD_VMRUN,
"-gu", OPT_VM_USER, "-gp", OPT_VM_PASS,
"runScriptInGuest", self.path,
"-activeWindow", "-interactive",
self.bash, script]
return exec_and_timeout(cmd, OPT_VM_LIFE)
def __stop_script(self):
if self.plat == "mac":
script = \
'''
killall AdobeReader
sleep %d
''' % (OPT_VM_REST)
elif self.plat == "win":
script = \
'''
taskkill /f /t /im "AcroRd32.exe"
timeout %d > NUL
''' % (OPT_VM_REST)
else:
raise Exception("Unknown platform: %s" % self.plat)
cmd = [OPT_CMD_VMRUN,
"-gu", OPT_VM_USER, "-gp", OPT_VM_PASS,
"runScriptInGuest", self.path,
"-activeWindow", "-interactive",
self.bash, script]
exec_and_wait(cmd)
def __download_results(self, tag, sample):
pn = join(OPT_DIR_TRACE,
"%s-%s-%s.out" % (self.name, tag, sample))
self.__download(OPT_FN_TRACE, pn)
pn = join(OPT_DIR_TRACE,
"%s-%s-%s.log" % (self.name, tag, sample))
self.__download(OPT_FN_LOG, pn)
def __record(self, tag, sample):
prec = join(OPT_DIR_VIDEO,
"%s-%s-%s.avi" % (self.name, tag, sample))
cmd = ["ffmpeg", "-f", "avfoundation",
"-framerate", "10", "-video_size", "320x240",
"-pixel_format", "bgr0",
"-i", "1:none", "-c:v", "libx264",
prec]
return exec_and_interact(cmd)
def run(self, tag, sample, force=False, record=False):
# test if processed
pn = join(OPT_DIR_TRACE,
"%s-%s-%s.out" % (self.name, tag, sample))
if exists(pn) and not force:
return
# download sample if needed
self.__prepare_sample(sample)
# start vm
self.__launch_vm(tag)
try:
# prepare context
script = self.__prepare_context(tag, sample)
# record if necessary
if record:
ps = self.__record()
else:
ps = None
# launch program
if not self.__run_script(script):
self.__stop_script()
# stop recording if necessary
if ps is not None:
ps.send_signal(signal.SIGINT)
sleep(1)
# download results
self.__download_results(tag, sample)
except Exception as err:
print err.message
self.__shutdown_vm()
# main
if __name__ == "__main__":
# machines
machines = dict()
machines["mac"] = Machine("x64", "mac", "11",
"/Users/bft/%s", "/bin/sh")
machines["win"] = Machine("x64", "win", "7",
"C:\\Users\\bft\\%s", "")
# parser
parser = ArgumentParser()
parser.add_argument("machine", help="Machine",
choices=("mac", "win"))
parser.add_argument("tag", help="Tag",
choices=(
"dc.07.20033",
"dc.08.20082",
"dc.09.20069",
"dc.10.20056",
"dc.10.20060",
"dc.16.20039",
"dc.16.20045",
"dc.17.20050",
"15.06.30033",
"11.0.00",
"11.0.10",
"10.0.0",
"10.1.0",
"10.1.4"
))
parser.add_argument("sample", help="Sample")
parser.add_argument("-f", "--force", action="store_true")
parser.add_argument("-r", "--record", action="store_true")
args = parser.parse_args()
# run
machine = machines[args.machine]
try:
machine.run(args.tag, args.sample, args.force, args.record)
except Exception as err:
print err.message
| 13,853 | 29.855234 | 97 | py |
platpal | platpal-master/vmware/scan-exploit.py | #!/usr/bin/env python
import os
import sys
from os.path import abspath, basename, dirname, exists, join
from glob import glob
from subprocess import call
# path
OPT_WKS = abspath(dirname(__file__))
OPT_DIR_VTDOC = join(OPT_WKS, "vtdoc", "analyze")
# enums
OPT_ENUM_PLAT = ["mac", "win"]
OPT_ENUN_TAGS = ["10", "11", "15", "dc"]
# commands
OPT_CMD_BOX = join(OPT_WKS, "box.py")
# consts
OPT_CVE_LIMIT = 10
# main
if __name__ == "__main__":
lst = glob(join(OPT_DIR_VTDOC, "exploit", "cve-2016-*"))
lst = sorted(lst, reverse = True)
cves = dict()
for fn in lst:
c = basename(fn)
with open(fn, "r") as f:
content = f.readlines()
dist = dict()
for l in content:
toks = l.strip().split(",")
if toks[1] not in dist:
dist[toks[1]] = []
if len(dist[toks[1]]) >= OPT_CVE_LIMIT:
continue
dist[toks[1]].append(toks[0])
cves[c] = []
for k in dist:
cves[c].extend(dist[k])
for c in cves:
samples = cves[c]
for s in samples:
for t in OPT_ENUN_TAGS:
for p in OPT_ENUM_PLAT:
cmd = ["python", OPT_CMD_BOX, p, t, s]
call(cmd)
| 1,346 | 21.830508 | 60 | py |
platpal | platpal-master/vmware/scan-srecent.py | #!/usr/bin/env python
import os
import sys
import json
from os.path import abspath, basename, dirname, exists, join
from subprocess import call
from collections import OrderedDict
# path
OPT_WKS = abspath(dirname(__file__))
OPT_DIR_VTDOC = join(OPT_WKS, "vtdoc", "analyze")
# enums
OPT_ENUM_PLAT = ["mac", "win"]
OPT_ENUN_TAGS = ["10", "11", "15", "dc"]
# commands
OPT_CMD_BOX = join(OPT_WKS, "box.py")
# consts
OPT_REC_LIMIT = 35
# main
if __name__ == "__main__":
samples = []
fn = join(OPT_DIR_VTDOC, "sort-srecents.csv")
with open(fn, "r") as f:
for l in f:
toks = l.strip().split(",")
if int(toks[1]) > OPT_REC_LIMIT:
samples.append(toks[0])
for s in samples:
for t in OPT_ENUN_TAGS:
for p in OPT_ENUM_PLAT:
cmd = ["python", OPT_CMD_BOX, p, t, s]
call(cmd)
| 954 | 22.292683 | 60 | py |
platpal | platpal-master/vmware/batch.py | #!/usr/bin/env python
import os
import sys
import json
from os.path import abspath, basename, dirname, exists, join
from subprocess import call
from collections import OrderedDict
# path
OPT_WKS = abspath(dirname(__file__))
OPT_CMD_BOX = join(OPT_WKS, "box.py")
# main
if __name__ == "__main__":
if len(sys.argv) < 4 :
sys.exit("Usage: %s <list> <platform> <product> <opts>" % sys.argv[0])
pn = sys.argv[1]
plat = sys.argv[2]
prod = sys.argv[3]
with open(pn, "r") as f:
hashes = [l.strip() for l in f]
for h in hashes:
cmd = ["python", OPT_CMD_BOX, plat, prod, h]
print "=== %s ===" % h
call(cmd)
if len(sys.argv) == 5 and sys.argv[4] == "-a":
continue
raw_input("\nPress enter for next sample\n")
| 827 | 22 | 78 | py |
MAIAN | MAIAN-master/tool/parse_code.py | from instruction_list import *
def print_code(code,ops):
for o in ops:
print('%6x : %4d : %2s : %12s : %s' % (o['id'],o['id'], o['op'],o['o'] , o['input']) )
print('Total byte/code size: %d %d' % (len(code)/2,len(ops)) )
def get_one_op( code, pos, size_of_input, debug=False ):
if pos + 2 + size_of_input > len(code ):
if debug: print('Incorrect code op at %x : %d : %d : %s' % (pos/2, pos+2+size_of_input, len(code), code[pos:] ) )
instruction = '0x' + code[pos:pos+2]
o = ''
if instruction in cops:
o = cops[ instruction ]
t = {'id':int(pos/2),'op':code[pos:pos+2],'input':code[pos+2:pos+2+2*size_of_input],'o':o}
return (pos + 2 + 2*size_of_input, t)
def parse_code( code, debug = False):
ops = list()
i = 0;
while i < len(code):
op = code[i:i+2]
if op >= '60' and op <='7f':
i, t = get_one_op( code, i, int(op,16) - int('60',16)+1, debug )
ops.append(t)
else:
i, t = get_one_op( code, i, 0, debug );
ops.append(t)
return ops
def code_has_instruction( code, ops):
for o in code:
if o['o'] in ops:
return True
return False
def get_dictionary_of_ops( ops ):
d = {}
for t in ops:
if t['op'] not in d: d[ t['op'] ] = True
return d
def has_call( ops ):
for t in ops:
if t['op'] == 'f1': return True
return False
def find_pos( code, byte_position):
found = -1
for i in range(len(code)) :
if code[i]['id'] == byte_position:
found = i
if found >= 0 and code[found]['o'] == 'JUMPDEST':
return found
return -1
| 1,674 | 24.769231 | 122 | py |
MAIAN | MAIAN-master/tool/instruction_list.py | cops = {
"0x00":"STOP",
"0x01":"ADD",
"0x02":"MUL",
"0x03":"SUB",
"0x04":"DIV",
"0x05":"SDIV",
"0x06":"MOD",
"0x07":"SMOD",
"0x08":"ADDMOD",
"0x09":"MULMOD",
"0x0a":"EXP",
"0x0b":"SIGNEXTEND",
"0x10":"LT",
"0x11":"GT",
"0x12":"SLT",
"0x13":"SGT",
"0x14":"EQ",
"0x15":"ISZERO",
"0x16":"AND",
"0x17":"OR",
"0x18":"XOR",
"0x19":"NOT",
"0x1a":"BYTE",
"0x20":"SHA3",
"0x30":"ADDRESS",
"0x31":"BALANCE",
"0x32":"ORIGIN",
"0x33":"CALLER",
"0x34":"CALLVALUE",
"0x35":"CALLDATALOAD",
"0x36":"CALLDATASIZE",
"0x37":"CALLDATACOPY",
"0x38":"CODESIZE",
"0x39":"CODECOPY",
"0x3a":"GASPRICE",
"0x3b":"EXTCODESIZE",
"0x3c":"EXTCODECOPY",
"0x3d":"MCOPY",
"0x40":"BLOCKHASH",
"0x41":"COINBASE",
"0x42":"TIMESTAMP",
"0x43":"NUMBER",
"0x44":"DIFFICULTY",
"0x45":"GASLIMIT",
"0x50":"POP",
"0x51":"MLOAD",
"0x52":"MSTORE",
"0x53":"MSTORE8",
"0x54":"SLOAD",
"0x55":"SSTORE",
"0x56":"JUMP",
"0x57":"JUMPI",
"0x58":"PC",
"0x59":"MSIZE",
"0x5a":"GAS",
"0x5b":"JUMPDEST",
"0x5c":"SLOADEXT",
"0x5c":"SLOADBYTESEXT",
"0x5d":"SSTOREEXT",
"0x5d":"SSTOREBYTESEXT",
"0x60":"PUSH1",
"0x61":"PUSH2",
"0x62":"PUSH3",
"0x63":"PUSH4",
"0x64":"PUSH5",
"0x65":"PUSH6",
"0x66":"PUSH7",
"0x67":"PUSH8",
"0x68":"PUSH9",
"0x69":"PUSH10",
"0x6a":"PUSH11",
"0x6b":"PUSH12",
"0x6c":"PUSH13",
"0x6d":"PUSH14",
"0x6e":"PUSH15",
"0x6f":"PUSH16",
"0x70":"PUSH17",
"0x71":"PUSH18",
"0x72":"PUSH19",
"0x73":"PUSH20",
"0x74":"PUSH21",
"0x75":"PUSH22",
"0x76":"PUSH23",
"0x77":"PUSH24",
"0x78":"PUSH25",
"0x79":"PUSH26",
"0x7a":"PUSH27",
"0x7b":"PUSH28",
"0x7c":"PUSH29",
"0x7d":"PUSH30",
"0x7e":"PUSH31",
"0x7f":"PUSH32",
"0x80":"DUP1",
"0x81":"DUP2",
"0x82":"DUP3",
"0x83":"DUP4",
"0x84":"DUP5",
"0x85":"DUP6",
"0x86":"DUP7",
"0x87":"DUP8",
"0x88":"DUP9",
"0x89":"DUP10",
"0x8a":"DUP11",
"0x8b":"DUP12",
"0x8c":"DUP13",
"0x8d":"DUP14",
"0x8e":"DUP15",
"0x8f":"DUP16",
"0x90":"SWAP1",
"0x91":"SWAP2",
"0x92":"SWAP3",
"0x93":"SWAP4",
"0x94":"SWAP5",
"0x95":"SWAP6",
"0x96":"SWAP7",
"0x97":"SWAP8",
"0x98":"SWAP9",
"0x99":"SWAP10",
"0x9a":"SWAP11",
"0x9b":"SWAP12",
"0x9c":"SWAP13",
"0x9d":"SWAP14",
"0x9e":"SWAP15",
"0x9f":"SWAP16",
"0xa0":"LOG0",
"0xa1":"LOG1",
"0xa2":"LOG2",
"0xa3":"LOG3",
"0xa4":"LOG4",
"0xf0":"CREATE",
"0xf1":"CALL",
"0xf2":"CALLCODE",
"0xf3":"RETURN",
"0xf4":"DELEGATECALL",
"0xf5":"BREAKPOINT",
"0xf6":"RNGSEED",
"0xf7":"SSIZEEXT",
"0xf8":"SLOADBYTES",
"0xf9":"SSTOREBYTES",
"0xfa":"SSIZE",
"0xfb":"STATEROOT",
"0xfc":"TXEXECGAS",
"0xfd":"REVERT",
"0xfe":"INVALID",
"0xff":"SUICIDE"
}
allops = {
"STOP": [0x00, 0, 0],
"ADD": [0x01, 2, 1],
"MUL": [0x02, 2, 1],
"SUB": [0x03, 2, 1],
"DIV": [0x04, 2, 1],
"SDIV": [0x05, 2, 1],
"MOD": [0x06, 2, 1],
"SMOD": [0x07, 2, 1],
"ADDMOD": [0x08, 3, 1],
"MULMOD": [0x09, 3, 1],
"EXP": [0x0a, 2, 1],
"SIGNEXTEND": [0x0b, 2, 1],
"LT": [0x10, 2, 1],
"GT": [0x11, 2, 1],
"SLT": [0x12, 2, 1],
"SGT": [0x13, 2, 1],
"EQ": [0x14, 2, 1],
"ISZERO": [0x15, 1, 1],
"AND": [0x16, 2, 1],
"OR": [0x17, 2, 1],
"XOR": [0x18, 2, 1],
"NOT": [0x19, 1, 1],
"BYTE": [0x1a, 2, 1],
"SHA3": [0x20, 2, 1],
"ADDRESS": [0x30, 0, 1],
"BALANCE": [0x31, 1, 1],
"ORIGIN": [0x32, 0, 1],
"CALLER": [0x33, 0, 1],
"CALLVALUE": [0x34, 0, 1],
"CALLDATALOAD": [0x35, 1, 1],
"CALLDATASIZE": [0x36, 0, 1],
"CALLDATACOPY": [0x37, 3, 0],
"CODESIZE": [0x38, 0, 1],
"CODECOPY": [0x39, 3, 0],
"GASPRICE": [0x3a, 0, 1],
"EXTCODESIZE": [0x3b, 1, 1],
"EXTCODECOPY": [0x3c, 4, 0],
"MCOPY": [0x3d, 3, 0],
"BLOCKHASH": [0x40, 1, 1],
"COINBASE": [0x41, 0, 1],
"TIMESTAMP": [0x42, 0, 1],
"NUMBER": [0x43, 0, 1],
"DIFFICULTY": [0x44, 0, 1],
"GASLIMIT": [0x45, 0, 1],
"POP": [0x50, 1, 0],
"MLOAD": [0x51, 1, 1],
"MSTORE": [0x52, 2, 0],
"MSTORE8": [0x53, 2, 0],
"SLOAD": [0x54, 1, 1],
"SSTORE": [0x55, 2, 0],
"JUMP": [0x56, 1, 0],
"JUMPI": [0x57, 2, 0],
"PC": [0x58, 0, 1],
"MSIZE": [0x59, 0, 1],
"GAS": [0x5a, 0, 1],
"JUMPDEST": [0x5b, 0, 0],
"SLOADEXT": [0x5c, 2, 1],
"SSTOREEXT": [0x5d, 3, 0],
"SLOADBYTESEXT": [0x5c, 4, 0],
"SSTOREBYTESEXT": [0x5d, 4, 0],
"PUSH1": [0x60, 0, 1],
"PUSH2": [0x61, 0, 1],
"PUSH3": [0x62, 0, 1],
"PUSH4": [0x63, 0, 1],
"PUSH5": [0x64, 0, 1],
"PUSH6": [0x65, 0, 1],
"PUSH7": [0x66, 0, 1],
"PUSH8": [0x67, 0, 1],
"PUSH9": [0x68, 0, 1],
"PUSH10": [0x69, 0, 1],
"PUSH11": [0x6a, 0, 1],
"PUSH12": [0x6b, 0, 1],
"PUSH13": [0x6c, 0, 1],
"PUSH14": [0x6d, 0, 1],
"PUSH15": [0x6e, 0, 1],
"PUSH16": [0x6f, 0, 1],
"PUSH17": [0x70, 0, 1],
"PUSH18": [0x71, 0, 1],
"PUSH19": [0x72, 0, 1],
"PUSH20": [0x73, 0, 1],
"PUSH21": [0x74, 0, 1],
"PUSH22": [0x75, 0, 1],
"PUSH23": [0x76, 0, 1],
"PUSH24": [0x77, 0, 1],
"PUSH25": [0x78, 0, 1],
"PUSH26": [0x79, 0, 1],
"PUSH27": [0x7a, 0, 1],
"PUSH28": [0x7b, 0, 1],
"PUSH29": [0x7c, 0, 1],
"PUSH30": [0x7d, 0, 1],
"PUSH31": [0x7e, 0, 1],
"PUSH32": [0x7f, 0, 1],
"DUP1": [0x80, 1, 2],
"DUP2": [0x81, 2, 3],
"DUP3": [0x82, 3, 4],
"DUP4": [0x83, 4, 5],
"DUP5": [0x84, 5, 6],
"DUP6": [0x85, 6, 7],
"DUP7": [0x86, 7, 8],
"DUP8": [0x87, 8, 9],
"DUP9": [0x88, 9, 10],
"DUP10": [0x89, 10, 11],
"DUP11": [0x8a, 11, 12],
"DUP12": [0x8b, 12, 13],
"DUP13": [0x8c, 13, 14],
"DUP14": [0x8d, 14, 15],
"DUP15": [0x8e, 15, 16],
"DUP16": [0x8f, 16, 17],
"SWAP1": [0x90, 2, 2],
"SWAP2": [0x91, 3, 3],
"SWAP3": [0x92, 4, 4],
"SWAP4": [0x93, 5, 5],
"SWAP5": [0x94, 6, 6],
"SWAP6": [0x95, 7, 7],
"SWAP7": [0x96, 8, 8],
"SWAP8": [0x97, 9, 9],
"SWAP9": [0x98, 10, 10],
"SWAP10": [0x99, 11, 11],
"SWAP11": [0x9a, 12, 12],
"SWAP12": [0x9b, 13, 13],
"SWAP13": [0x9c, 14, 14],
"SWAP14": [0x9d, 15, 15],
"SWAP15": [0x9e, 16, 16],
"SWAP16": [0x9f, 17, 17],
"LOG0": [0xa0, 2, 0],
"LOG1": [0xa1, 3, 0],
"LOG2": [0xa2, 4, 0],
"LOG3": [0xa3, 5, 0],
"LOG4": [0xa4, 6, 0],
"CREATE": [0xf0, 3, 1],
"CALL": [0xf1, 7, 1],
"CALLCODE": [0xf2, 7, 1],
"RETURN": [0xf3, 2, 0],
"DELEGATECALL": [0xf4, 6, 1],
"BREAKPOINT": [0xf5, 0, 0],
"RNGSEED": [0xf6, 1, 1],
"SSIZEEXT": [0xf7, 2, 1],
"SLOADBYTES": [0xf8, 3, 0],
"SSTOREBYTES": [0xf9, 3, 0],
"SSIZE": [0xfa, 1, 1],
"STATEROOT": [0xfb, 1, 1],
"TXEXECGAS": [0xfc, 0, 1],
"REVERT": [0xfd, 0, 0],
# "CALLSTATIC": [0xfd, 7, 1],
"INVALID": [0xfe, 0, 0], #
"SUICIDE": [0xff, 1, 0],
"STOP": [0x00, 0, 0]
}
| 7,192 | 23.056856 | 35 | py |
MAIAN | MAIAN-master/tool/gui-maian.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'form.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
try:
import PyQt5
except:
print("\033[91m[-] Python module PyQt5 is missing.\033[0m Please install it (on Ubuntu: sudo apt install python-pyqt5)")
exit(1)
from PyQt5 import QtCore, QtGui, QtWidgets
from Queue import Queue
import sys
import subprocess
import threading
import platform
import re
import maian
import blockchain
class WriteStream(object):
def __init__(self,queue):
self.queue = queue
def write(self, text):
self.queue.put(text)
class MyReceiver(QtCore.QObject):
mysignal = QtCore.pyqtSignal(str)
def __init__(self,queue,*args,**kwargs):
QtCore.QObject.__init__(self,*args,**kwargs)
self.queue = queue
@QtCore.pyqtSlot()
def run(self):
while True:
text = self.queue.get()
self.mysignal.emit(text)
class LongRunningThing(QtCore.QObject):
def __init__(self, myvar, parent=None):
QtCore.QThread.__init__(self, parent)
self.notifyProgress = QtCore.pyqtSignal(int)
self.l = myvar
@QtCore.pyqtSlot()
def run(self):
for el in self.l:
maian.main(el)
dfont = "Linux Biolinum O"
if platform.dist()[0] == 'Ubuntu':
dfont = "Ubuntu Condensed"
class Ui_MAIAN(object):
def setupUi(self, MAIAN):
MAIAN.setObjectName("MAIAN")
MAIAN.resize(950, 821)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MAIAN.sizePolicy().hasHeightForWidth())
MAIAN.setSizePolicy(sizePolicy)
MAIAN.setMinimumSize(QtCore.QSize(950, 815))
MAIAN.setMaximumSize(QtCore.QSize(950, 815))
font = QtGui.QFont()
font.setFamily("Liberation Sans")
font.setPointSize(10)
MAIAN.setFont(font)
self.groupBox = QtWidgets.QGroupBox(MAIAN)
self.groupBox.setGeometry(QtCore.QRect(10, 640, 431, 111))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.groupBox.setFont(font)
self.groupBox.setObjectName("groupBox")
self.lineMaxFuncInv = QtWidgets.QLineEdit(self.groupBox)
self.lineMaxFuncInv.setGeometry(QtCore.QRect(330, 30, 91, 27))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.lineMaxFuncInv.setFont(font)
self.lineMaxFuncInv.setFrame(False)
self.lineMaxFuncInv.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lineMaxFuncInv.setObjectName("lineMaxFuncInv")
self.label_4 = QtWidgets.QLabel(self.groupBox)
self.label_4.setGeometry(QtCore.QRect(20, 40, 221, 20))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.groupBox)
self.label_5.setGeometry(QtCore.QRect(20, 80, 171, 17))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.lineSolverTimeout = QtWidgets.QLineEdit(self.groupBox)
self.lineSolverTimeout.setGeometry(QtCore.QRect(330, 70, 91, 27))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.lineSolverTimeout.setFont(font)
self.lineSolverTimeout.setFrame(False)
self.lineSolverTimeout.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.lineSolverTimeout.setObjectName("lineSolverTimeout")
self.lineMaxFuncInv.raise_()
self.label_4.raise_()
self.label_5.raise_()
self.lineSolverTimeout.raise_()
self.groupBox_2 = QtWidgets.QGroupBox(MAIAN)
self.groupBox_2.setGeometry(QtCore.QRect(10, 10, 431, 621))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.groupBox_2.setFont(font)
self.groupBox_2.setObjectName("groupBox_2")
self.txtSolidity = QtWidgets.QTextEdit(self.groupBox_2)
self.txtSolidity.setGeometry(QtCore.QRect(20, 130, 391, 471))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.txtSolidity.setFont(font)
self.txtSolidity.setFrameShape(QtWidgets.QFrame.NoFrame)
self.txtSolidity.setFrameShadow(QtWidgets.QFrame.Plain)
self.txtSolidity.setAcceptRichText(False)
self.txtSolidity.setObjectName("txtSolidity")
self.radioSolidity = QtWidgets.QRadioButton(self.groupBox_2)
self.radioSolidity.setGeometry(QtCore.QRect(20, 30, 191, 22))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.radioSolidity.setFont(font)
self.radioSolidity.setChecked(True)
self.radioSolidity.setObjectName("radioSolidity")
self.radioBytecodecompiled = QtWidgets.QRadioButton(self.groupBox_2)
self.radioBytecodecompiled.setGeometry(QtCore.QRect(20, 90, 171, 22))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.radioBytecodecompiled.setFont(font)
self.radioBytecodecompiled.setObjectName("radioBytecodecompiled")
self.radioBytecode = QtWidgets.QRadioButton(self.groupBox_2)
self.radioBytecode.setGeometry(QtCore.QRect(20, 60, 311, 22))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.radioBytecode.setFont(font)
self.radioBytecode.setObjectName("radioBytecode")
self.lineSolidityName = QtWidgets.QLineEdit(self.groupBox_2)
self.lineSolidityName.setGeometry(QtCore.QRect(300, 30, 113, 21))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.lineSolidityName.setFont(font)
self.lineSolidityName.setFrame(False)
self.lineSolidityName.setObjectName("lineSolidityName")
self.label_6 = QtWidgets.QLabel(self.groupBox_2)
self.label_6.setGeometry(QtCore.QRect(210, 30, 91, 20))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.groupBox_3 = QtWidgets.QGroupBox(MAIAN)
self.groupBox_3.setGeometry(QtCore.QRect(450, 10, 491, 741))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.groupBox_3.setFont(font)
self.groupBox_3.setObjectName("groupBox_3")
self.txtLog = QtWidgets.QTextEdit(self.groupBox_3)
self.txtLog.setGeometry(QtCore.QRect(20, 420, 451, 301))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(10)
font.setBold(False)
font.setWeight(50)
self.txtLog.setFont(font)
self.txtLog.setFrameShape(QtWidgets.QFrame.NoFrame)
self.txtLog.setFrameShadow(QtWidgets.QFrame.Plain)
self.txtLog.setAcceptRichText(False)
self.txtLog.setTextInteractionFlags(QtCore.Qt.TextSelectableByKeyboard|QtCore.Qt.TextSelectableByMouse)
self.txtLog.setObjectName("txtLog")
self.pushStart = QtWidgets.QPushButton(self.groupBox_3)
self.pushStart.setGeometry(QtCore.QRect(240, 30, 231, 91))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.pushStart.setFont(font)
self.pushStart.setObjectName("pushStart")
self.checkGreedy = QtWidgets.QCheckBox(self.groupBox_3)
self.checkGreedy.setGeometry(QtCore.QRect(20, 90, 151, 22))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.checkGreedy.setFont(font)
self.checkGreedy.setChecked(True)
self.checkGreedy.setObjectName("checkGreedy")
self.checkSuicidal = QtWidgets.QCheckBox(self.groupBox_3)
self.checkSuicidal.setGeometry(QtCore.QRect(20, 60, 151, 22))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.checkSuicidal.setFont(font)
self.checkSuicidal.setChecked(True)
self.checkSuicidal.setObjectName("checkSuicidal")
self.checkProdigal = QtWidgets.QCheckBox(self.groupBox_3)
self.checkProdigal.setGeometry(QtCore.QRect(20, 30, 171, 22))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.checkProdigal.setFont(font)
self.checkProdigal.setChecked(True)
self.checkProdigal.setObjectName("checkProdigal")
self.txtResults = QtWidgets.QTextEdit(self.groupBox_3)
self.txtResults.setGeometry(QtCore.QRect(20, 130, 451, 271))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(15)
font.setBold(False)
font.setWeight(50)
self.txtResults.setFont(font)
self.txtResults.setFrameShape(QtWidgets.QFrame.NoFrame)
self.txtResults.setFrameShadow(QtWidgets.QFrame.Plain)
self.txtResults.setAcceptRichText(False)
self.txtResults.setTextInteractionFlags(QtCore.Qt.TextSelectableByKeyboard|QtCore.Qt.TextSelectableByMouse)
self.txtResults.setObjectName("txtResults")
self.groupBox_4 = QtWidgets.QGroupBox(MAIAN)
self.groupBox_4.setGeometry(QtCore.QRect(10, 760, 931, 51))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.groupBox_4.setFont(font)
self.groupBox_4.setTitle("")
self.groupBox_4.setObjectName("groupBox_4")
self.lineSolidityName_2 = QtWidgets.QLineEdit(self.groupBox_4)
self.lineSolidityName_2.setGeometry(QtCore.QRect(10, 10, 951, 21))
font = QtGui.QFont()
font.setFamily(dfont)
font.setPointSize(12)
font.setBold(False)
font.setWeight(50)
self.lineSolidityName_2.setFont(font)
self.lineSolidityName_2.setAutoFillBackground(False)
self.lineSolidityName_2.setStyleSheet("background-color:transparent;\n"
"")
self.lineSolidityName_2.setFrame(False)
self.lineSolidityName_2.setReadOnly(True)
self.lineSolidityName_2.setObjectName("lineSolidityName_2")
self.retranslateUi(MAIAN)
QtCore.QMetaObject.connectSlotsByName(MAIAN)
self.pushStart.clicked.connect(self.start_thread)
self.txtLog.textChanged.connect(self.changed_log)
self.txtSolidity.textChanged.connect(self.changed_source)
self.last_pos = 0
self.locked_text = False
@QtCore.pyqtSlot(str)
def changed_source(self):
tx = self.txtSolidity.toPlainText()
pt = '^[0-9A-Fa-fx ]+$'
remat = re.match(pt,tx)
if remat is None and tx.find('contract') >= 0:
self.radioSolidity.setChecked(True)
ml = re.findall('contract[ |\t|\n]*[a-zA-Z0-9_]*', tx)
if len(ml) == 0:
pass
elif len(ml) == 1:
cnam = re.sub('contract[ |\t|\n]*','',ml[0])
self.lineSolidityName.setText(cnam)
else:
pass
elif remat is not None:
if len(re.findall('60606040', tx)) > 1:
self.radioBytecode.setChecked(True)
else:
self.radioBytecodecompiled.setChecked(True)
@QtCore.pyqtSlot(str)
def changed_log(self):
if self.locked_text: return
self.locked_text = True
tx = self.txtLog.toPlainText()
trl = ['0','1','91','92','93','94']
for tz in trl:
tx = tx.replace('\033['+tz+'m','')
tx = tx.replace('['+tz+'m','')
self.txtLog.setText(tx)
cursor = self.txtLog.textCursor()
cursor.movePosition(QtGui.QTextCursor.End, QtGui.QTextCursor.MoveAnchor)
self.txtLog.setTextCursor(cursor)
self.locked_text = False
t = ''
vp = vs = vg = False
if self.txtLog.toPlainText().find('Check if contract is PRODIGAL') >= 0:
t += '<strong>Check on PRODIGAL </strong> <br />'
if self.txtLog.toPlainText().find('The code does not have CALL/SUICIDE,') >= 0:
t += '<font color="green">Not vulnerable</font><br />'
if self.txtLog.toPlainText().find('Leak vulnerability found') >= 0:
t += '<font color="red">Vulnerability found</font><br />'
vp = True
if self.txtLog.toPlainText().find('Confirmed ! The contract is prodigal') >= 0:
t += '<font color="red">Vulnerability confirmed</font><br />'
if self.txtLog.toPlainText().find('Cannot confirm the leak vulnerability') >= 0:
t += '<font color="blue">Cannot confirm the vulnerability</font><br />'
if self.txtLog.toPlainText().find('Cannot confirm the bug because the contract is not deployed on the blockchain') >= 0:
t += '<font color="blue">Cannot confirm because there is no source code</font><br />'
if self.txtLog.toPlainText().find('No prodigal vulnerability found') >= 0:
t += '<font color="green">Not vulnerable</font>'
if vp:
t += '(see the log below)<br />'
if len(t) > 0:
t += "<br />"
if self.txtLog.toPlainText().find('Check if contract is SUICIDAL') >= 0:
t += '<strong>Check on SUICIDAL </strong><br /> '
if self.txtLog.toPlainText().find('Suicidal vulnerability found') >= 0:
t += '<font color="red">Vulnerability found</font><br />'
vs = True
if self.txtLog.toPlainText().find('Confirmed ! The contract is suicidal') >= 0:
t += '<font color="red">Vulnerability confirmed</font><br />'
if self.txtLog.toPlainText().find('Cannot confirm the suicide vulnerability') >= 0:
t += '<font color="blue">Cannot confirm the vulnerability</font><br />'
if self.txtLog.toPlainText().find('The code does not contain SUICIDE instructions, hence it is not vulnerable') >= 0:
t += '<font color="green">Not vulnerable</font>'
if self.txtLog.toPlainText().find('No suicidal vulnerability found') >= 0:
t += '<font color="green">Not vulnerable</font>'
if vs:
t += '(see the log below)<br />'
if len(t) > 0:
t += "<br />"
if self.txtLog.toPlainText().find('Check if contract is GREEDY') >= 0:
t += '<strong>Check on GREEDY </strong><br /> '
if self.txtLog.toPlainText().find('No lock vulnerability found because the contract cannot receive Ether') >= 0:
t += '<font color="green">Not vulnerable</font>'
if self.txtLog.toPlainText().find('No locking vulnerability found') >= 0:
t += '<font color="green">Not vulnerable</font>'
if self.txtLog.toPlainText().find('The code does not have CALL/SUICIDE/DELEGATECALL/CALLCODE') >= 0:
t += '<font color="red">Vulnerability found</font><br />'
vg = True
if self.txtLog.toPlainText().find('Locking vulnerability found') >= 0:
t += '<font color="red">Vulnerability found</font><br />'
vg = True
self.txtResults.setHtml(t)
@QtCore.pyqtSlot(str)
def append_text(self,text):
self.txtLog.insertPlainText( text )
@QtCore.pyqtSlot()
def start_thread(self):
self.txtLog.clear()
self.txtResults.clear()
contract = self.txtSolidity.toPlainText()
with open('out/lastcontract','w') as f:
f.write(contract.encode('utf-8'))
f.close()
if self.radioBytecode.isChecked():
type_of_contract = ['--bytecode_source','out/lastcontract']
pt = '^[0-9A-Fa-fx ]+$'
result = re.match(pt, self.txtSolidity.toPlainText())
if result is None:
msg = QtWidgets.QMessageBox()
msg.setWindowTitle("Something went wrong")
msg.setText("The provided code is not bytecode.")
msg.exec_()
return
elif self.radioBytecodecompiled.isChecked():
type_of_contract = ['--bytecode','out/lastcontract']
pt = '^[0-9A-Fa-fx ]+$'
result = re.match(pt, self.txtSolidity.toPlainText())
if result is None:
msg = QtWidgets.QMessageBox()
msg.setWindowTitle("Something went wrong")
msg.setText("The provided code is not bytecode.")
msg.exec_()
return
elif self.radioSolidity.isChecked():
conname = self.lineSolidityName.text()
if len(conname) == 0:
msg = QtWidgets.QMessageBox()
msg.setWindowTitle("Something went wrong")
msg.setText("If the type of source code is Solidity, then you need to specify the main contract name.")
msg.exec_()
return
if self.txtSolidity.toPlainText().find(conname) < 0:
msg = QtWidgets.QMessageBox()
msg.setWindowTitle("Something went wrong")
msg.setText("Contract '"+conname+"' does not exist in the Solidity code.")
msg.exec_()
return
type_of_contract = ['--soliditycode','out/lastcontract',conname]
max_inv = ['--max_inv',self.lineMaxFuncInv.text()]
stimout = ['--solve_timeout',self.lineSolverTimeout.text()]
perform_checks = []
if self.checkProdigal.isChecked():
perform_checks.append( type_of_contract + max_inv + stimout + ['--check','1'] )
if self.checkSuicidal.isChecked():
perform_checks.append( type_of_contract + max_inv + stimout + ['--check','0'] )
if self.checkGreedy.isChecked():
perform_checks.append( type_of_contract + max_inv + stimout + ['--check','2'] )
self.thread = QtCore.QThread()
self.long_running_thing = LongRunningThing(perform_checks)
self.long_running_thing.moveToThread(self.thread)
self.thread.started.connect(self.long_running_thing.run)
self.thread.start()
def retranslateUi(self, MAIAN):
_translate = QtCore.QCoreApplication.translate
MAIAN.setWindowTitle(_translate("MAIAN", "MAIAN v1.0"))
self.groupBox.setTitle(_translate("MAIAN", "Settings"))
self.lineMaxFuncInv.setText(_translate("MAIAN", "3"))
self.label_4.setText(_translate("MAIAN", "Max function invocations"))
self.label_5.setText(_translate("MAIAN", "Solver timeout (msec)"))
self.lineSolverTimeout.setText(_translate("MAIAN", "10000"))
self.groupBox_2.setToolTip(_translate("MAIAN", "The name of the main contract"))
self.groupBox_2.setTitle(_translate("MAIAN", "Type of contract code"))
self.txtSolidity.setProperty("placeholderText", _translate("MAIAN", "Put your code here. Usually, the type is recognized automatically."))
self.radioSolidity.setText(_translate("MAIAN", "Solidity source code"))
self.radioBytecodecompiled.setText(_translate("MAIAN", "Bytecode compiled"))
self.radioBytecode.setText(_translate("MAIAN", "Bytecode source"))
self.lineSolidityName.setPlaceholderText(_translate("MAIAN", "Main contract name"))
self.label_6.setText(_translate("MAIAN", "Contract name"))
self.groupBox_3.setTitle(_translate("MAIAN", "Run"))
self.txtLog.setProperty("placeholderText", _translate("MAIAN", "Log will appear here"))
self.pushStart.setText(_translate("MAIAN", "START"))
self.checkGreedy.setText(_translate("MAIAN", "Check on Greedy"))
self.checkSuicidal.setText(_translate("MAIAN", "Check on Suicidal"))
self.checkProdigal.setText(_translate("MAIAN", "Check on Prodigal"))
self.txtResults.setHtml(_translate("MAIAN", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Linux Biolinum O\'; font-size:15pt; font-weight:400; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Geneva\'; font-size:10pt;\"><br /></p></body></html>"))
self.txtResults.setProperty("placeholderText", _translate("MAIAN", "Main results will appear here"))
self.lineSolidityName_2.setText(_translate("MAIAN", "To keep MAIAN free and up to date, consider donating Ether to our account: 0xfd03b29b5c20f878836a3b35718351adf24f4a06"))
def cev():
blockchain.kill_active_blockchain()
sys.exit(0)
if __name__ == "__main__":
queue = Queue()
sys.stdout = WriteStream(queue)
app = QtWidgets.QApplication(sys.argv)
app.aboutToQuit.connect(cev)
MAIAN = QtWidgets.QWidget()
ui = Ui_MAIAN()
ui.setupUi(MAIAN)
MAIAN.show()
thread = QtCore.QThread()
my_receiver = MyReceiver(queue)
my_receiver.mysignal.connect(ui.append_text)
my_receiver.moveToThread(thread)
thread.started.connect(my_receiver.run)
thread.start()
sys.exit(app.exec_())
| 22,794 | 39.416667 | 218 | py |
MAIAN | MAIAN-master/tool/check_suicide.py | from __future__ import print_function
from parse_code import *
from values import get_params, set_params, initialize_params, print_params, MyGlobals, clear_globals
from execute_block import *
from blockchain import *
def ether_suicide( op, stack, trace, debug ):
# Once SUICIDE is executed, the contract is killed
# Thus the search is stoppped and the contract is flagged as suicidal
global stop_search
MyGlobals.stop_search = True
return True, True
def run_one_check( max_call_depth, ops, contract_address, debug, read_from_blockchain ):
print('\n[ ]\033[1m Search with call depth: %d : \033[0m' % (max_call_depth) , end = '')
initialize_params(read_from_blockchain, contract_address )
clear_globals()
global MAX_CALL_DEPTH
MyGlobals.MAX_CALL_DEPTH = max_call_depth
storage = {}
stack = []
mmemory = {}
data = {}
trace = []
configurations = {}
execute_one_block(ops,stack,0, trace, storage, mmemory, data, configurations, ['SUICIDE'], ether_suicide, 0, 0, debug, read_from_blockchain )
def check_one_contract_on_suicide(contract_bytecode, contract_address, debug, read_from_blockchain, confirm_exploit=False, fhashes=[] ):
print('\033[94m[ ] Check if contract is SUICIDAL\033[0m\n')
print('[ ] Contract address : %s' % contract_address)
print('[ ] Contract bytecode : %s...' % contract_bytecode[:50])
print('[ ] Bytecode length : %d' % len(contract_bytecode) )
print('[ ] Blockchain contract: %s' % confirm_exploit)
print('[ ] Debug : %s' % debug)
ops = parse_code( contract_bytecode, debug )
if not code_has_instruction( ops, ['SUICIDE']) :
#if debug:
print('\n\033[92m[-] The code does not contain SUICIDE instructions, hence it is not vulnerable\033[0m')
return False
if debug: print_code( contract_bytecode, ops )
# Make the amount of sent Ether symbolic variable (i.e., it can take any value)
global symbolic_vars
MyGlobals.symbolic_vars = ['CALLVALUE']
#
# Search for function invocations (from 1 to max_calldepth) that can make the contract the be killed
#
for i in range( 1 , MyGlobals.max_calldepth_in_normal_search + 1 ):
run_one_check( i, ops, contract_address, debug, read_from_blockchain )
if MyGlobals.stop_search:
break
if MyGlobals.stop_search:
print('\n\n\033[91m[-] Suicidal vulnerability found! \033[0m\n\n The following %d transaction(s) will trigger the contract to be killed:' % MyGlobals.no_function_calls )
for n in range(MyGlobals.no_function_calls):
print(' -Tx[%d] :' % (n+1), end='' )
for j in range(len(MyGlobals.function_calls[n+1]['input'] )):
if (j-8) % 64 == 0: print(' ',end='')
print('%s' % MyGlobals.function_calls[n+1]['input'][j], end='')
print('')
if len(fhashes) > 0:
print('\n The transactions correspond to the functions:')
for n in range(MyGlobals.no_function_calls):
if MyGlobals.function_calls[n+1]['input'][:8] in fhashes:
print(' -'+fhashes[ MyGlobals.function_calls[n+1]['input'][:8] ])
print()
if confirm_exploit:
print('\033[1m[ ] Confirming suicide vulnerability on private chain ... \033[0m', end='' )
txs = []
for n in range(MyGlobals.no_function_calls):
tx = {}
tx['from'] = '0x' + MyGlobals.adversary_account
tx['to'] = contract_address
tx['value'] = MyGlobals.function_calls[n+1]['value']
tx['data'] = '0x' + MyGlobals.function_calls[n+1]['input']
txs.append(tx)
execute_transactions( txs)
bcod = MyGlobals.web3.eth.getCode(contract_address)
if len(bcod) <= 2:
print('\n \033[1m\033[91mConfirmed ! The contract is suicidal !\033[0m')
else:
print('\033[94m Cannot confirm the suicide vulnerability \033[0m')
else:
print('\033[94m[-] Cannot confirm the bug because the contract is not deployed on the blockchain.\033[0m')
return True
print('\n\033[92m[-] No suicidal vulnerability found \033[0m')
return False
| 4,410 | 31.91791 | 180 | py |
MAIAN | MAIAN-master/tool/maian.py | '''
Copyright (c) 2018, Ivica Nikolic <cube444@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from __future__ import print_function
from web3 import Web3, KeepAliveRPCProvider, IPCProvider
import argparse,subprocess,sys
found_depend = False
try:
import z3
except:
print("\033[91m[-] Python module z3 is missing.\033[0m Please install it (check https://github.com/Z3Prover/z3)")
found_depend = True
try:
import web3
except:
print("\033[91m[-] Python module web3 is missing.\033[0m Please install it (pip install web3).")
found_depend = True
if not (subprocess.call("type solc", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0):
print("\033[91m[-] Solidity compiler is missing.\033[0m Please install it (check http://solidity.readthedocs.io/en/develop/installing-solidity.html) and make sure solc is in the path.")
found_depend = True
if not (subprocess.call("type geth", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0):
print("\033[91m[-] Go Ethereum is missing.\033[0m Please install it (check https://ethereum.github.io/go-ethereum/install/) and make sure geth is in the path.")
found_depend = True
if found_depend:
sys.exit(1)
import check_suicide
import check_leak
import check_lock
from values import MyGlobals
from blockchain import *
from contracts import *
global debug, max_calldepth_in_normal_search, read_from_blockchain, checktype
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument("-c","--check", type=str, help="Check type: use 0 for SUICIDAL check, 1 for PRODIGAL, and 2 for GREEDY", action='store')
parser.add_argument("-s","--soliditycode", type=str, help="Check solidity contract by specifying: 1) contract file, 2) Main contract name ", action='store', nargs=2)
parser.add_argument("-b","--bytecode", type=str, help="Check compiled bytecode contract by specifying contract file", action='store')
parser.add_argument("-bs","--bytecode_source", type=str, help="Check source bytecode contract by specifying contract file", action='store')
parser.add_argument("--debug", help="Print extended debug info ", action='store_true')
parser.add_argument("--max_inv", help="The maximal number of function invocations (default 3) ", action='store')
parser.add_argument("--solve_timeout", help="Z3 solver timeout in milliseconds (default 10000, i.e. 10 seconds)", action='store')
args = parser.parse_args( args )
if args.debug: MyGlobals.debug = True
if args.max_inv: MyGlobals.max_calldepth_in_normal_search = int(args.max_inv)
if args.solve_timeout: MyGlobals.SOLVER_TIMEOUT = int(args.solve_timeout)
if args.check: MyGlobals.checktype = int(args.check)
kill_active_blockchain()
if args.soliditycode or args.bytecode_source:
print('\n'+'=' * 100)
read_from_blockchain = True
# First compile the contract and produce bytecode/abi
fhashes = {}
if args.soliditycode:
compile_contract(args.soliditycode[0])
contract_code_path = 'out/'+args.soliditycode[1]+'.bin'
if not os.path.isfile( contract_code_path ):
print('\033[91m[-] Contract %s does NOT exist\033[0m' % args.soliditycode[1] )
return
# Get the contract function hashes (used later if the contract has vulnerability)
fhashes = get_function_hashes( args.soliditycode[1] )
# Connect (start) the private blockchain
start_private_chain('emptychain',MyGlobals.etherbase_account)
# If check on leak then we need to send Ether to the contract address before deploying it
# This helps later to verify that the contract leaks Ether
# Sending Ether has to be done prior to deployment of contract because the contract code may not allow arbitrary account to send Ether
if 1 == MyGlobals.checktype:
supposed_contract_address = predict_contract_address(MyGlobals.etherbase_account)
print('\033[1m[ ] Sending Ether to contract %s \033[0m' % supposed_contract_address, end='')
execute_transactions([{'from':'0x'+MyGlobals.sendingether_account,'to':supposed_contract_address,'value':MyGlobals.send_initial_wei}])
print('\033[92m Sent! \033[0m')
# Deploy the contract
if args.soliditycode: contract_address = deploy_contract(args.soliditycode[1], MyGlobals.etherbase_account)
else: contract_address = deploy_contract(args.bytecode_source, MyGlobals.etherbase_account, True)
if contract_address is None:
print('\033[91m[-] Cannot deploy the contract \033[0m' )
return
# If check on leak, then make sure the contract has Ether
if 1 == MyGlobals.checktype:
bal = MyGlobals.web3.eth.getBalance(contract_address)
print('\033[1m[ ] The contract balance: %d \033[0m' % bal, end='' )
if bal > 0:
print('\033[92m Positive balance\033[0m')
else:
print('cound not send Ether to contract')
return
code = MyGlobals.web3.eth.getCode(contract_address)
if code[0:2] == '0x': code = code[2:]
if 0 == MyGlobals.checktype: ret = check_suicide.check_one_contract_on_suicide(code, contract_address, MyGlobals.debug, MyGlobals.read_from_blockchain, True, fhashes)
elif 1== MyGlobals.checktype: ret = check_leak.check_one_contract_on_ether_leak(code, contract_address, MyGlobals.debug, MyGlobals.read_from_blockchain, True, fhashes)
elif 2== MyGlobals.checktype: ret = check_lock.check_one_contract_on_ether_lock(code, contract_address, MyGlobals.debug, MyGlobals.read_from_blockchain)
kill_active_blockchain()
elif args.bytecode:
print('\n'+'=' * 100)
read_from_blockchain = False
filepath_code = args.bytecode
if not os.path.isfile(filepath_code):
print('\033[91m[-] File %s does NOT exist\033[0m' % filepath_code )
return
with open(filepath_code,'r') as f: code = f.read(); f.close()
code = code.replace('\n','').replace('\r','').replace(' ','')
if code[0:2] == '0x': code = code[2:]
if 0 == MyGlobals.checktype: ret = check_suicide.check_one_contract_on_suicide(code, '', MyGlobals.debug, MyGlobals.read_from_blockchain, False)
elif 1 == MyGlobals.checktype: ret = check_leak.check_one_contract_on_ether_leak(code, '', MyGlobals.debug, MyGlobals.read_from_blockchain, False)
elif 2 == MyGlobals.checktype: ret = check_lock.check_one_contract_on_ether_lock(code, '', MyGlobals.debug, MyGlobals.read_from_blockchain)
else:
pass
if __name__ == '__main__':
global exec_as_script
MyGlobals.exec_as_script = True
import sys
main(sys.argv[1:])
| 7,987 | 42.650273 | 189 | py |
MAIAN | MAIAN-master/tool/check_leak.py | from __future__ import print_function
from parse_code import *
from values import get_params, set_params, initialize_params, print_params, MyGlobals, clear_globals
from execute_block import *
from blockchain import *
def ether_leak( op, stack, trace, debug ):
global s
# CALL leaks
if op == 'CALL' and len(stack) >= 7 and stack[-2]['type'] == 'constant' and stack[-3]['type']=='constant':
MyGlobals.s.push()
MyGlobals.s.add( stack[-2]['z3'] == BitVecVal( int( get_params('my_address',''), 16), 256) ) # CALL sent address coincides with our address
MyGlobals.s.add( stack[-3]['z3'] > 0) # amount of Ether sent is > 0
try:
if MyGlobals.s.check() == sat:
# Search condition found but keep expanding the search tree to make sure that the execution ends normally,
# i.e. with STOP/RETURN/SUICIDE
return True, False
except Exception as e:
print ("Exception: "+str(e))
MyGlobals.s.pop()
# SUICIDE leaks
if op == 'SUICIDE' and len(stack) >= 1 and stack[-1]['type'] == 'constant':
MyGlobals.s.push()
MyGlobals.s.add( stack[-1]['z3'] == BitVecVal( int( get_params('my_address',''), 16), 256) ) # SUICIDE send address coincides with our address
try:
if MyGlobals.s.check() == sat:
# Once SUICIDE is executed, then no need to look for the final STOP or RETURN
# because SUICIDE is already a stopping instruction
global stop_search
MyGlobals.stop_search = True
return True, True
except Exception as e:
print ("Exception: "+str(e))
MyGlobals.s.pop()
return False, False
def run_one_check( max_call_depth, ops, contract_address, debug, read_from_blockchain ):
global MAX_CALL_DEPTH
print('\n[ ]\033[1m Search with call depth: %d : \033[0m' % (max_call_depth) , end = '')
initialize_params(read_from_blockchain, contract_address )
clear_globals()
# The amount of sent Ether to the contract is zero
set_params( 'call_value', '','0' )
MyGlobals.MAX_CALL_DEPTH = max_call_depth
storage = {}
stack = []
mmemory = {}
data = {}
trace = []
configurations = {}
execute_one_block(ops,stack,0, trace, storage, mmemory, data, configurations, ['CALL','SUICIDE'], ether_leak, 0, 0, debug, read_from_blockchain )
def check_one_contract_on_ether_leak(contract_bytecode, contract_address, debug = False, read_from_blockchain = False, confirm_exploit=False, fhashes=[] ):
print('\033[94m[ ] Check if contract is PRODIGAL\033[0m\n')
print('[ ] Contract address : %s' % contract_address)
print('[ ] Contract bytecode : %s...' % contract_bytecode[:50])
print('[ ] Bytecode length : %d' % len(contract_bytecode) )
print('[ ] Blockchain contract: %s' % confirm_exploit)
print('[ ] Debug : %s' % debug)
ops = parse_code( contract_bytecode, debug )
if not code_has_instruction( ops, ['CALL','SUICIDE']) :
#if debug:
print('\033[92m[+] The code does not have CALL/SUICIDE, hence it is not prodigal\033[0m')
return False
if debug: print_code( contract_bytecode, ops )
#
# Search for function invocations (from 1 to max_calldepth) that can make the contract to leak Ether
#
for i in range( 1 , MyGlobals.max_calldepth_in_normal_search + 1 ):
run_one_check( i, ops, contract_address, debug, read_from_blockchain )
if MyGlobals.stop_search:
break
if MyGlobals.stop_search:
print('\n\n\033[91m[-] Leak vulnerability found! \033[0m\n\n The following %d transaction(s) will trigger the contract to leak:' % MyGlobals.no_function_calls )
for n in range(MyGlobals.no_function_calls):
print(' -Tx[%d] :' % (n+1), end='' )
for j in range(len(MyGlobals.function_calls[n+1]['input'] )):
if (j-8) % 64 == 0: print(' ',end='')
print('%s' % MyGlobals.function_calls[n+1]['input'][j], end='')
print('')
if len(fhashes) > 0:
print('\n The transactions correspond to the functions:')
for n in range(MyGlobals.no_function_calls):
if MyGlobals.function_calls[n+1]['input'][:8] in fhashes:
print(' -'+fhashes[ MyGlobals.function_calls[n+1]['input'][:8] ])
print()
if confirm_exploit:
print('\033[1m[ ] Confirming leak vulnerability on private chain ... \033[0m', end='' )
txs = []
for n in range(MyGlobals.no_function_calls):
tx = {}
tx['from'] = '0x' + MyGlobals.adversary_account
tx['to'] = contract_address
tx['value'] = MyGlobals.function_calls[n+1]['value']
tx['data'] = '0x' + MyGlobals.function_calls[n+1]['input']
txs.append(tx)
adversary_ether_before = MyGlobals.web3.eth.getBalance('0x' + MyGlobals.adversary_account)
weiused = execute_transactions( txs)
difference_in_wei = MyGlobals.web3.eth.getBalance('0x' + MyGlobals.adversary_account) + weiused - adversary_ether_before
if difference_in_wei > 0:
print('\n \033[1m\033[91mConfirmed ! The contract is prodigal !\033[0m')
else:
print('\033[94m Cannot confirm the leak vulnerability \033[0m')
else:
print('\033[94m[-] Cannot confirm the bug because the contract is not deployed on the blockchain.\033[0m')
return True
print('\n\033[92m[+] No prodigal vulnerability found \033[0m')
return False
| 5,936 | 34.130178 | 171 | py |
MAIAN | MAIAN-master/tool/blockchain.py | from __future__ import print_function
from web3 import Web3, KeepAliveRPCProvider, IPCProvider
import subprocess, signal
import time
import sys
import os
from values import MyGlobals
def start_private_chain(chain,etherbase,debug=False):
devnull = open(os.devnull, 'w')
if chain!= 'remptychain':
# Remove previous blockchain
pr=subprocess.Popen(['rm','-rf','./blockchains/'+chain])
pr.wait()
# Init new blockchain
if debug: pr=subprocess.Popen(['geth','--datadir','./blockchains/'+chain,'init','./blockchains/genesis.json'])
else: pr=subprocess.Popen(['geth','--datadir','./blockchains/'+chain,'init','./blockchains/genesis.json'],stdout=devnull, stderr=devnull)
pr.wait()
# Copy the accounts
pr=subprocess.Popen(['cp','-r','./blockchains/remptychain/keystore','./blockchains/'+chain+'/'])
pr.wait()
if Web3(KeepAliveRPCProvider(host='127.0.0.1', port=MyGlobals.port_number)).isConnected() :
print('\033[91m[-] Some blockchain is active, killing it... \033[0m', end='')
kill_active_blockchain()
if not( Web3(KeepAliveRPCProvider(host='127.0.0.1', port=MyGlobals.port_number)).isConnected() ):
print('\033[92m Killed \033[0m')
else:
print('Cannot kill')
print('\033[1m[ ] Connecting to PRIVATE blockchain %s \033[0m' % chain, end='')
if debug:
pro = subprocess.Popen(['geth','--rpc','--rpccorsdomain','"*"','--rpcapi="db,eth,net,web3,personal,web3"', '--rpcport',MyGlobals.port_number, '--datadir','blockchains/'+chain,'--networkid','123','--mine','--minerthreads=1','--etherbase='+MyGlobals.etherbase_account])
else:
pro = subprocess.Popen(['geth','--rpc','--rpccorsdomain','"*"','--rpcapi="db,eth,net,web3,personal,web3"', '--rpcport',MyGlobals.port_number, '--datadir','blockchains/'+chain,'--networkid','123','--mine','--minerthreads=1','--etherbase='+MyGlobals.etherbase_account],stdout=devnull, stderr=devnull)
global web3
MyGlobals.web3 = Web3(KeepAliveRPCProvider(host='127.0.0.1', port=MyGlobals.port_number))
while( not MyGlobals.web3.isConnected() ):
print('',end='.')
if MyGlobals.exec_as_script:
sys.stdout.flush()
time.sleep(1)
if( MyGlobals.web3.isConnected() ):
print('\033[92m ESTABLISHED \033[0m')
else:
print('\033[93m[-] Connection failed. Exiting\033[0m')
exit(2)
return pro
def kill_active_blockchain():
devnull = open(os.devnull, 'w')
p = subprocess.Popen(['fuser',MyGlobals.port_number+'/tcp'], stdout=subprocess.PIPE, stderr=devnull)
out, err = p.communicate()
for line in out.splitlines():
pid = int(line.split(None, 1)[0])
os.kill(pid, signal.SIGKILL)
time.sleep(0.5)
devnull = open(os.devnull, 'w')
#p = subprocess.Popen(['lsof','+D','blockchains/','-t' ], stdout=subprocess.PIPE, stderr=devnull)
p = subprocess.Popen(['lsof','-t','-i','tcp:'+MyGlobals.port_number ], stdout=subprocess.PIPE, stderr=devnull)
out, err = p.communicate()
for line in out.splitlines():
pid = int(line.split(None, 1)[0])
p2 = subprocess.Popen(['ps','-p',str(pid) ], stdout=subprocess.PIPE, stderr=devnull)
out2,err2 = p2.communicate()
if bytes.decode(out2).find('datadir') >= 0:
os.kill(pid, signal.SIGKILL)
time.sleep(1)
def execute_transactions(txs):
count = 0
weiused = 0
for tx in txs:
MyGlobals.web3.personal.unlockAccount(tx['from'],'1',15000)
try:
hash = MyGlobals.web3.eth.sendTransaction( tx )
while MyGlobals.web3.eth.getTransaction(hash)['blockNumber'] is None:
print('.',end='')
if MyGlobals.exec_as_script:
sys.stdout.flush()
time.sleep(1)
print(' tx[%d] mined ' % count, end='')
weiused += MyGlobals.web3.eth.getTransactionReceipt(hash)['gasUsed'] * MyGlobals.web3.eth.getTransaction(hash)['gasPrice']
except Exception as e:
print ("Exception: "+str(e))
count +=1
return weiused
| 4,235 | 34.3 | 306 | py |
MAIAN | MAIAN-master/tool/misc.py | from __future__ import print_function
from values import MyGlobals
from hashlib import *
from z3 import *
def print_stack(stack):
print('\033[90m------------------------------------- STACK -------------------------------------')
for s in stack[::-1]:
if 'z3' in s:
if is_bv_value( simplify(s['z3'])): print('%10s : %4x : %x' % (s['type'],s['step'],simplify(s['z3']).as_long() ) )
else: print('%10s : %4x : %s' % (s['type'],s['step'], simplify(s['z3']) ) )
else:
print('%10s : %4x ' % (s['type'],s['step']) )
print('\033[0m')
def print_storage(storage):
print('************************************ STORAGE ************************************')
for fl in storage:
for s in storage[fl]:
print('\033[91m[ %64x ] \033[0m : ' % (fl), end='' )
if is_bv_value( simplify(s['z3'])): print('%x' % (simplify(s['z3']).as_long() ) )
else: print('%s' % (simplify(s['z3']) ) )
def print_memory(mmemory):
print('************************************ MEMORY ************************************')
for m in mmemory:
fl = mmemory[m]
print('\033[91m[ %64x ] \033[0m : ' % (m), end='' )
if fl['type'] == 'undefined' : print('undefined' )
elif is_bv_value( simplify(fl['z3'])): print('%x' % (simplify(fl['z3']).as_long() ) )
else: print('%s' % (simplify(fl['z3']) ) )
def print_trace(trace):
print('++++++++++++++++++++++++++++ Trace ++++++++++++++++++++++++++++')
for o in trace:
print('%6x : %2s : %12s : %s' % (o['id'],o['op'],o['o'] , o['input']) )
# Computes hash of input
def get_hash(txt):
k = md5()
k.update(txt.encode('utf-8'))
return int(k.hexdigest(),16)
# Determines the TX inputs so that the contract can be exploited
def get_function_calls( calldepth, debug ):
global s, no_function_calls, function_calls
if MyGlobals.s.check() == sat:
m = MyGlobals.s.model()
if debug: print('\nSolution:')
sol = {}
for d in m:
if debug: print('%s -> %x' % (d,m[d].as_long() ) )
sol[str(d)] = '%x' % m[d].as_long()
function_inputs = {}
# Get separate calldepth inputs
for cd in range (1,calldepth+1):
# Find next free
next_free = 0
for f in range(100):
if ('input'+str(cd)+'['+str(4+32*f)+']') in sol or ('input'+str(cd)+'['+str(4+32*f)+']d') in sol:
next_free = 32*f + 32
# Fix weird addresses
for f in range(100):
addr = 'input'+str(cd)+'['+str(4+32*f)+']d'
if addr in sol:
old_address = int(sol[addr],16)
del sol[addr]
sol[addr[:-1]] = '%x'% next_free
for offset in range(100):
check_address = 'input'+str(cd)+'['+('%x'%(4+old_address + 32*offset))+']'
if check_address in sol:
sol['input'+str(cd)+'['+'%d'%(4+int(next_free)) +']' ] = sol[check_address]
del sol[check_address]
next_free += 32
# Produce the input of the call
tmp_one = {}
for addr in sol:
if addr.find('input'+str(cd)+'[') >= 0:
tmp_one[addr] = sol[addr]
# The function hash
function_hash = 'input'+str(cd)+'[0]'
if function_hash not in tmp_one:
# print('Cannot find function hash')
# print(tmp_one)
return False
if len(tmp_one[ function_hash] [:-56]) > 0:
function_inputs[cd] = '%08x'% int(tmp_one[ function_hash] [:-56],16)
else:
function_inputs[cd] = '0'
del tmp_one[function_hash]
# Function arguments
max_seen = 4
for offset in range(100):
addr = 'input'+str(cd)+'['+'%d'%(4+offset*32)+']'
if addr in tmp_one:
function_inputs[cd] = function_inputs[cd] + '%064x' % int(tmp_one[addr],16)
max_seen = 4+(offset+1)*32
del tmp_one[addr]
else:
function_inputs[cd] = function_inputs[cd] + '%064x' % 0
function_inputs[cd] = function_inputs[cd][:2*max_seen]
if len(tmp_one) > 0:
print('Some addresses are larger')
print(tmp_one)
return False
MyGlobals.no_function_calls = calldepth
MyGlobals.function_calls = {}
for n in range(10):
if n in function_inputs:
call_value = 0
for d in m:
if str(d) == ('CALLVALUE-'+str(n)):
call_value = m[d].as_long()
MyGlobals.function_calls[n] = {'input':function_inputs[n],'value': call_value}
if calldepth != len(MyGlobals.function_calls):
MyGlobals.no_function_calls = 0
MyGlobals.function_calls = {}
return False
#print(MyGlobals.function_calls)
if debug:
for n in range(calldepth):
print('- %d - %10x - ' % (n+1, MyGlobals.function_calls[n+1]['value'] ), end='' )
for j in range(len(MyGlobals.function_calls[n+1]['input'] )):
if (j-8) % 64 == 0: print(' ',end='')
print('%s' % MyGlobals.function_calls[n+1]['input'][j], end='')
print('')
return True
else:
MyGlobals.no_function_calls = 0
return False
| 5,817 | 32.245714 | 127 | py |
MAIAN | MAIAN-master/tool/check_lock.py | from __future__ import print_function
from parse_code import *
from values import get_params, set_params, initialize_params, print_params, MyGlobals, clear_globals
from execute_block import *
def ether_lock_can_recieve( op, stack, trace, debug ):
# Once STOP/RETURN is executed, the search can be stoppped
global stop_search
MyGlobals.stop_search = True
return True, True
def ether_lock_can_send( op, stack, trace, debug ):
# Once one of the instructions that can send Ether is reached, then the search can be stoppped
# If terminating instruction SUICIDE, the can stop the further search
if op in ['SUICIDE']:
global stop_search
MyGlobals.stop_search = True
return True, True
# When the op is one among CALL,CALLCODE, and DELEGATECALL, there can be two possibilites:
#
# 1) Once we find CALL, ..., we assume the contract can send Ether and thus has no problem
# In this case we stop the search immediately
elif MyGlobals.ETHER_LOCK_GOOD_IF_CAN_CALL and op in ['CALL','CALLCODE','DELEGATECALL']:
global stop_search
MyGlobals.stop_search = True
return True, True
# 2) Once we find CALL, we still need to reach some STOP, RETURN, etc.
# In this case, we are more precise, but may lead to false positives
else:
return True, False
def run_one_check( max_call_depth, ops, contract_address, debug, read_from_blockchain ):
print('\n[ ]\033[1m Search with call depth: %d : \033[0m' % (max_call_depth) , end = '')
initialize_params(read_from_blockchain, contract_address )
clear_globals()
global MAX_CALL_DEPTH
MyGlobals.MAX_CALL_DEPTH = max_call_depth
storage = {}
stack = []
mmemory = {}
data = {}
trace = []
configurations = {}
execute_one_block(ops,stack,0, trace, storage, mmemory, data, configurations, ['CALL','CALLCODE','DELEGATECALL','SUICIDE'], ether_lock_can_send, 0, 0, debug, read_from_blockchain )
def check_one_contract_on_ether_lock(contract_bytecode, contract_address, debug = False, read_from_blockchain = False):
print('\033[94m[ ] Check if contract is GREEDY\033[0m\n')
print('[ ] Contract address : %s' % contract_address)
print('[ ] Contract bytecode : %s...' % contract_bytecode[:50])
print('[ ] Bytecode length : %d' % len(contract_bytecode) )
print('[ ] Debug : %s' % debug)
global MAX_CALL_DEPTH, symbolic_vars, symbolic_sha
ops = parse_code( contract_bytecode, debug )
#
#
# First check if Ether can be received by the contract
#
#
MyGlobals.symbolic_vars = []
initialize_params(read_from_blockchain, contract_address )
set_params( 'call_value', '','100' )
clear_globals()
MyGlobals.MAX_CALL_DEPTH = 1 # Only one function has to be called
storage = {}
stack = []
mmemory = {}
data = {}
trace = []
configurations = {}
execute_one_block(ops,stack,0, trace, storage, mmemory, data, configurations, ['STOP','RETURN'], ether_lock_can_recieve, 0, 0, debug, read_from_blockchain )
print(('\033[91m[-]' if not MyGlobals.stop_search else '\033[92m[+]')+'\033[0m \033[1mContract can receive Ether\033[0m' )
# If it did not find, then the contract cannot receive Ether and thus it cannot lock ether (is not bad )
if not MyGlobals.stop_search:
print('\n\033[92m[-] No lock vulnerability found because the contract cannot receive Ether \033[0m')
return False
#
#
# Then check if Ether can be released by the contract
#
#
# If it does not have instructions that send Ether, then obviously it locks
if not code_has_instruction( ops, ['CALL','CALLCODE','DELEGATECALL','SUICIDE']) :
#if debug:
print('\033[91m[-] The code does not have CALL/SUICIDE/DELEGATECALL/CALLCODE thus is greedy !\033[0m')
return True
if debug: print_code( contract_bytecode, ops )
# Make some blockchain variables symbolic so they can take any value
MyGlobals.symbolic_vars = ['CALLVALUE','CALLER','NUMBER','TIMESTAMP','BLOCKHASH','BALANCE','ADDRESS','ORIGIN','EXTCODESIZE']
MyGlobals.symbolic_sha = True
MyGlobals.symbolic_load= True
#
# Search
#
for i in range( 1 , MyGlobals.max_calldepth_in_normal_search + 1 ):
run_one_check( i, ops, contract_address, debug, read_from_blockchain )
if MyGlobals.stop_search:
print('\n\033[92m[+] No locking vulnerability found \033[0m')
return False
print('\n\n\033[91m[-] Locking vulnerability found! \033[0m' )
return True
| 4,701 | 30.77027 | 185 | py |
MAIAN | MAIAN-master/tool/execute_block.py | from __future__ import print_function
import os
import sys
import re
from execute_instruction import *
from values import get_params, initialize_params, print_params
from values import MyGlobals, clear_globals
from misc import *
def execute_one_block( ops , stack , pos , trace, storage, mmemory, data, configurations, search_op, search_function, jumpdepth, calldepth, debug, read_from_blockchain):
global s, stop_search, search_condition_found, visited_nodes
if MyGlobals.stop_search : return
MyGlobals.visited_nodes += 1
if MyGlobals.visited_nodes > MyGlobals.MAX_VISITED_NODES: return
# Execute the next block of operations
first = True
newpos = pos
while (first or newpos != pos) and not MyGlobals.stop_search:
first = False
pos = newpos
# If no more code, then stop
if pos >= len(ops) or pos < 0:
if debug: print('\033[94m[+] Reached bad/end of execution\033[0m')
return False
# Debug info
if debug: print('[ %3d %3d %5d] : %4x : %12s : %s ' % (calldepth, jumpdepth, MyGlobals.visited_nodes, ops[pos]['id'], ops[pos]['o'], ops[pos]['input']) )
# Check if calldepth or jumpdepth should be changed
# and stop the search if certain conditions are met
if pos == 0:
calldepth += 1
jumpdepth = 0
if ops[pos]['o'] == 'JUMPDEST': jumpdepth += 1
if( jumpdepth > MyGlobals.MAX_JUMP_DEPTH):
if debug:print ('\033[95m[-] Reach MAX_JUMP_DEPTH\033[0m' )
return
if( calldepth > MyGlobals.MAX_CALL_DEPTH):
if debug:print ('\033[95m[-] Reach MAX_CALL_DEPTH\033[0m' )
return
# Check if configuration exist if
# - it is the first instruction in the code (the code restarted)
# - it is jumpdest
# - it is the first instruction after JUMPI
if pos == 0 or ops[pos]['o'] == 'JUMPDEST' or (pos > 0 and ops[pos-1]['o'] == 'JUMPI'):
if seen_configuration( configurations, ops, pos, stack, mmemory, storage):
if debug:print ('\033[95m[-] Seen configuration\033[0m' )
return
# Check if the current op is one of the search ops
if ops[pos]['o'] in search_op:
if debug:
print('\033[96m[+] Reached %s at %x \033[0m' % (ops[pos]['o'], ops[pos]['id'] ) )
print_stack( stack )
new_search_condition_found, stop_expanding_the_search_tree = search_function( ops[pos]['o'] , stack , trace, debug )
MyGlobals.search_condition_found = MyGlobals.search_condition_found or new_search_condition_found
if stop_expanding_the_search_tree:
get_function_calls( calldepth, debug )
if MyGlobals.stop_search or stop_expanding_the_search_tree: return
# Execute the next operation
newpos, halt = execute( ops, stack, pos, storage, mmemory, data, trace, calldepth, debug, read_from_blockchain )
# If halt is True, then the execution should stop
if halt:
if debug: print('\033[94m[+] Halted on %s on line %x \033[0m' % (ops[pos]['o'],ops[pos]['id']))
# If normal stop
if ops[pos]['o'] in ['STOP','RETURN','SUICIDE']:
# If search condition still not found then call again the contract
# (infinite loop is prevented by calldepth )
if not MyGlobals.search_condition_found:
stack = []
mmemory = {}
newpos = 0
#data = {}
if not debug:
print('%d' % calldepth,end='')
if MyGlobals.exec_as_script:
sys.stdout.flush()
continue
# Else stop the search
else:
MyGlobals.stop_search = True
get_function_calls( calldepth, debug )
return
# In all other cases stop further search in this branch of the tree
else:
return
# If program counter did not move
# It means either:
# 1) we need to branch
# 2) calldataload
# 3) calldatasize
# 4) unknown instruction
if pos == newpos:
si = ops[pos]
# It can be JUMPI
if si['o'] == 'JUMPI':
if len(stack) < 2:
if debug: print('\033[95m[-] In JUMPI (line %x) the stack is too small to execute JUMPI\033[0m' % pos )
return False
addr = stack.pop()
des = stack.pop()
if is_undefined(des):
if debug: print('\033[95m[-] In JUMPI the expression cannot be evaluated (is undefined)\033[0m' )
return False
sole = ' * sole * '
#
# Branch when decision is incorrect (no need to compute the addresses)
#
# In the fast search mode, the jumpi pos + 1 must be in the list of good jump positions
if is_good_jump( ops, pos+1, debug ):
MyGlobals.s.push()
MyGlobals.s.add( des['z3'] == 0)
try:
if MyGlobals.s.check() == sat:
storage2 = copy.deepcopy(storage)
stack2 = copy.deepcopy(stack)
trace2 = copy.deepcopy(trace)
mmemory2 = copy.deepcopy(mmemory)
data2 = copy.deepcopy(data)
if debug: print('\t'*8+'-'*20+'JUMPI branch 1 (go through)')
sole = ''
execute_one_block(ops,stack2, pos + 1, trace2, storage2, mmemory2, data2, configurations, search_op, search_function, jumpdepth+1, calldepth, debug, read_from_blockchain )
except Exception as e:
print ("Exception: "+str(e))
MyGlobals.s.pop()
if MyGlobals.stop_search: return
#
# Branch when the decision is possibly correct
#
if not is_fixed(addr):
if debug: print('\033[95m[-] In JUMPI the jump address cannot be determined \033[0m' % jump_dest )
return False
jump_dest = get_value( addr )
if( jump_dest <= 0):
if debug: print('\033[95m[-] The jump destination is not a valid address : %x\033[0m' % jump_dest )
return False
new_position= find_pos(ops, jump_dest )
if( new_position < 0):
if debug: print('\033[95m[-] The code has no such jump destination: %s at line %x\033[0m' % (hex(jump_dest), si['id']) )
return False
# In the fast search mode, the jumpi new_position must be in the list of good jump positions
if is_good_jump( ops, new_position, debug ):
MyGlobals.s.push()
MyGlobals.s.add( des['z3'] != 0)
try:
if MyGlobals.s.check() == sat:
if debug:
if ops[pos]['id'] - MyGlobals.last_eq_step < 5:
print('\t'*8+'-'*18+'\033[96m %2d Executing function %x \033[0m' % (calldepth, MyGlobals.last_eq_func) )
storage2 = copy.deepcopy(storage)
stack2 = copy.deepcopy(stack)
trace2 = copy.deepcopy(trace)
mmemory2 = copy.deepcopy(mmemory)
data2 = copy.deepcopy(data)
if debug: print( ('\t'*8+'-'*20+'JUMPI branch 2 (jump) on step %x' + sole ) % ops[pos]['id'] )
execute_one_block(ops,stack2, new_position, trace2, storage2, mmemory2, data2, configurations, search_op, search_function, jumpdepth, calldepth, debug, read_from_blockchain )
except Exception as e:
print ("Exception: "+str(e))
MyGlobals.s.pop()
return
# It can be CALLDATALOAD
elif si['o'] == 'CALLDATALOAD':
addr = stack.pop()
# First find the symbolic variable name
text = str(addr['z3'])
regex = re.compile('input[0-9]*\[[0-9 ]*\]')
match = re.search( regex, text)
if match:
sm = text[match.start():match.end()]
# assign random (offset) address as value for the variable
random_address = get_hash(sm) >> 64
r2 = re.compile('\[[0-9 ]*\]')
indmat = re.search( r2, sm )
index = -2
if indmat:
index = int( sm[indmat.start()+1:indmat.end()-1] )
total_added_to_solver = 0
# add 'd' at the end of the name of the symbolic variable (used later to distinguish them)
if index>= 0 and ('data-'+str(calldepth)+'-'+str(index)) in data:
data[('data-'+str(calldepth)+'-'+str(index))] = BitVec(sm+'d',256)
MyGlobals.s.push()
MyGlobals.s.add( data[('data-'+str(calldepth)+'-'+str(index))] == random_address )
total_added_to_solver = 1
# replace the variable with concrete value in stack and memory
for st in stack:
if 'z3' in st:
st['z3'] = simplify(substitute( st['z3'], (BitVec(sm,256),BitVecVal(random_address, 256))))
for st in mmemory:
if 'z3' in mmemory[st]:
mmemory[st]['z3'] = simplify(substitute( mmemory[st]['z3'], (BitVec(sm,256),BitVecVal(random_address, 256))))
# replace in the address as well
addr = simplify(substitute(addr['z3'], (BitVec(sm,256),BitVecVal(random_address, 256)) ) )
# Branch
branch_array_size = [0,1,2]
for one_branch_size in branch_array_size:
storage2 = copy.deepcopy(storage)
stack2 = copy.deepcopy(stack)
trace2 = copy.deepcopy(trace)
mmemory2 = copy.deepcopy(mmemory)
data2 = copy.deepcopy(data)
data2['data-'+str(calldepth)+'-' + str(addr)] = BitVecVal(one_branch_size,256)
for i in range(one_branch_size):
data2['data-'+str(calldepth)+'-'+ str(addr.as_long()+32+32*i)] = BitVec('input'+str(calldepth)+'['+('%s'%(addr.as_long()+32+32*i))+']',256)
stack2.append( {'type':'constant','step':ops[pos]['id'], 'z3':BitVecVal( one_branch_size, 256)})
MyGlobals.s.push()
MyGlobals.s.add( BitVec('input'+str(calldepth)+('[%x'%addr.as_long())+']',256) == one_branch_size)
execute_one_block(ops,stack2, pos+1, trace2, storage2, mmemory2, data2, configurations, search_op, search_function, jumpdepth, calldepth, debug, read_from_blockchain )
MyGlobals.s.pop()
for ta in range(total_added_to_solver):
MyGlobals.s.pop()
else:
if debug:
print('\033[95m[-] In CALLDATALOAD the address does not contain symbolic variable input[*]\033[0m' )
print( addr )
return
return
# It can be CALLDATASIZE
elif si['o'] == 'CALLDATASIZE':
# Assume it is SYMBOLIC variable
storage2 = copy.deepcopy(storage)
stack2 = copy.deepcopy(stack)
trace2 = copy.deepcopy(trace)
mmemory2 = copy.deepcopy(mmemory)
data2 = copy.deepcopy(data)
if -1 not in data2:
data2['inputlength-'+str(calldepth)] = BitVec('inputlength-'+str(calldepth), 256)
stack2.append( {'type':'constant','step':ops[pos]['id'], 'z3': data2['inputlength-'+str(calldepth)]} )
execute_one_block(ops,stack2, pos+1, trace2, storage2, mmemory2, data2, configurations, search_op, search_function, jumpdepth, calldepth, debug, read_from_blockchain )
# or Branch on 4 different FIXED sizes
branch_array_size = [0,8,8+1*32,8+2*32]
for one_branch_size in branch_array_size:
storage2 = copy.deepcopy(storage)
stack2 = copy.deepcopy(stack)
trace2 = copy.deepcopy(trace)
mmemory2 = copy.deepcopy(mmemory)
data2 = copy.deepcopy(data)
stack2.append( {'type':'constant','step':ops[pos]['id'], 'z3': BitVecVal(one_branch_size,256)} )
execute_one_block(ops,stack2, pos+1, trace2, storage2, mmemory2, data2, configurations, search_op, search_function, jumpdepth, calldepth, debug, read_from_blockchain )
return
# If nothing from above then stop
else:
print('\033[95m[-] Unknown %s on line %x \033[0m' % (si['o'],ops[pos]['id']) )
return
| 14,211 | 37.514905 | 212 | py |
MAIAN | MAIAN-master/tool/execute_instruction.py | from __future__ import print_function
import copy
from math import *
from instruction_list import *
from parse_code import *
from values import get_params,set_params,print_params,is_params
from values import create_configuration,add_configuration,configuration_exist,seen_configuration,print_configuration
from values import MyGlobals
from hashlib import *
from sha3 import *
import random
import time
from datetime import datetime
from z3 import *
from misc import *
def is_fixed(s): return s['type'] == 'constant' and is_bv_value(simplify(s['z3']))
def is_undefined(s): return s['type'] == 'undefined'
def get_value(s): return simplify(s['z3']).as_long()
def power(y, x, n):
if x == 0: #base case
return 1
elif (x%2==0): #x even
return power((y*y)%n,x//2,n)%n
else: #x odd
return (y*power((y*y)%n,x//2,n))%n
def store_in_memory( mmemory, addr, value ):
for i in range(addr+1, addr+32):
if i in mmemory:
if not is_undefined(mmemory[i]):
if is_undefined( value ):
mmemory[i]['type'] = 'undefined'
continue
obytes = (i - addr);
old_value = mmemory[i]['z3']
new_value = ( old_value & (2**(8*obytes) - 1) ) ^ (value['z3'] << (8*obytes) )
if new_value == 0: del mmemory[i]
else: mmemory[i]['z3'] = new_value
for i in range(addr-31,addr):
if i in mmemory:
if not is_undefined(mmemory[i]):
if is_undefined( value ):
mmemory[i]['type'] = 'undefined'
continue
obytes = addr - i;
old_value = mmemory[i]['z3']
new_value = ( old_value & ( (2**(8*obytes)-1) << (8*(32-obytes) ) ) ) ^ ( value ['z3'] >> (8*obytes ) )
if new_value == 0: del mmemory[i]
else: mmemory[i]['z3'] = new_value
mmemory[addr] = value;
def unary( o1, step, op='NONE' ):
if is_undefined(o1): return {'type':'undefined','step':step}
z1 = simplify(o1['z3'])
if op == 'NOT': z3 = ~z1
elif op == 'ISZERO': z3 = If(z1 == 0, BitVecVal(1, 256), BitVecVal(0, 256))
else:
print('did not process unary operation %s ' % op )
print(o1)
return {'type':'undefined','step':step}
return {'type':'constant','step':step, 'z3': z3}
def binary( o1, o2 , step, op='NONE'):
# In some cases the result can be determined with the knowledge of only one operand
if is_fixed(o1):
val = simplify(o1['z3']).as_long()
if op in ['MUL','AND','DIV','SDIV'] and 0 == val: return {'type':'constant','step':step, 'z3':BitVecVal(0,256) }
if op in ['XOR','ADD'] and 0 == val: return o2
if is_fixed(o2):
val = simplify(o2['z3']).as_long()
if op in ['MUL','AND','DIV','SDIV'] and 0 == val: return {'type':'constant','step':step, 'z3':BitVecVal(0,256) }
if op in ['XOR','ADD'] and 0 == val: return o1
# If some of the operands is undefined then the result should be undefined
if is_undefined(o1) or is_undefined(o2): return {'type':'undefined','step':step}
z1 = simplify(o1['z3'])
z2 = simplify(o2['z3'])
if op =='AND' : z3 = z1 & z2
elif op =='OR' : z3 = z1 | z2
elif op =='XOR' : z3 = z1 ^ z2
elif op =='ADD' : z3 = z1 + z2
elif op =='SUB' : z3 = z1 - z2
elif op =='EXP' :
if is_bv_value(z1) and is_bv_value(z2):
z3 = BitVecVal( power (z1.as_long(), z2.as_long(), 2**256), 256 )
else:
return {'type':'undefined','step':step}
elif op =='DIV' : z3 = UDiv(z1,z2)
elif op =='SDIV': z3 = z1/z2
elif op =='MOD' : z3 = URem(z1,z2)
elif op =='SMOD' : z3 = z1 % z2
elif op =='MUL' : z3 = z1 * z2
elif op =='GT' : z3 = If(UGT(z1, z2), BitVecVal(1, 256), BitVecVal(0, 256))
elif op =='SGT' : z3 = If(z1 > z2, BitVecVal(1, 256), BitVecVal(0, 256))
elif op =='LT' : z3 = If(ULT(z1, z2), BitVecVal(1, 256), BitVecVal(0, 256))
elif op =='SLT' : z3 = If(z1 < z2, BitVecVal(1, 256), BitVecVal(0, 256))
elif op =='EQ' :
global last_eq_step, last_eq_func
# May reveal function calls
# last_eq_step and _func are used only in the debugging mode
if is_bv_value(z1) and z1.as_long() < 2**32 and z1.as_long() > 2**28:
MyGlobals.last_eq_step = step
MyGlobals.last_eq_func = z1.as_long()
if is_bv_value(z2) and z2.as_long() < 2**32 and z2.as_long() > 2**28:
MyGlobals.last_eq_step = step
MyGlobals.last_eq_func = z2.as_long()
z3 = If(z1 == z2, BitVecVal(1, 256), BitVecVal(0, 256))
else:
print('did not process binary operation %s ' % op)
print(o1)
print(o2)
return {'type':'undefined','step':step}
return {'type':'constant','step':step, 'z3': z3}
def ternary( o1, o2 , o3, step, op='NONE'):
if o3['type'] == 'constant' and is_bv_value(simplify(o3['z3'])) and 0 == simplify(o3['z3']).as_long(): return {'type':'constant','step':step, 'z3':BitVecVal(0,256) }
z1 = simplify(o1['z3'])
z2 = simplify(o2['z3'])
z3 = simplify(o3['z3'])
if op == 'ADDMOD': return {'type':'constant', 'step':step, 'z3': (z1+z2) % z3 }
elif op == 'MULMOD': return {'type':'constant', 'step':step, 'z3': (z1*z2) % z3 }
else:
print('did not process ternary operation %s ' % op)
print(o1)
print(o2)
print(o3)
return {'type':'undefined','step':step}
def is_good_jump(ops,pos, debug):
return True
if debug:print ('\033[95m[-] Bad jump :%x\033[0m' % ops[pos]['id'] )
return False
def execute( code, stack, pos, storage, mmemory, data, trace, calldepth, debug, read_from_blockchain ):
op = code[pos]['o']
halt = False
executed = True
step = code[pos]['id']
if op not in allops:
print('Unknown operation %s at pos %x' % (op,pos) )
return pos,True
# check if stack has enough elements
if allops[op][1] > len(stack):
if debug: print('Not enough entries in the stack to execute the operation %8s at step %x: required %d, provided %d' % (op,code[pos]['id'], allops[op][1], len(stack)) )
return pos, True
start_stack_size = len(stack)
final_stack_size = len(stack) - allops[op][1] + allops[op][2]
# get arguments from the stack
# the cases of DUP and SWAP are different, so avoid those
args = []
if op.find('SWAP') < 0 and op.find('DUP') < 0 and op not in ['JUMPI']:
for i in range( allops[op][1] ): args.append( stack.pop() )
# all unary
if op in ['ISZERO','NOT']:
stack.append( unary ( args[0] ,step, op ) )
# all binary except SIGNEXTEND
elif op in ['ADD','MUL','SUB','DIV','SDIV','MOD','SMOD','EXP','AND','OR','XOR', 'LT','GT','SLT','SGT','EQ']:
stack.append( binary ( args[0] , args[1] , step , op ) )
# all ternary
elif op in ['ADDMOD','MULMOD']:
stack.append( ternary( args[0], args[1], args[2], step, op ) )
elif op == 'SIGNEXTEND':
if not is_fixed(args[0]) or not is_fixed(args[1]):
stack.append( {'type':'undefined','step':step} )
else:
o = get_value(args[1])
t = 256 - 8*( get_value(args[0]) + 1 )
tbit = (o >> t ) & 1
n = 0
for i in range(256):
n ^= (tbit if i<= t else ((o>>i)&1)) << i
stack.append( {'type':'undefined','step':step, 'z3':BitVecVal( n, 256 ) } )
elif op == 'SHA3':
addr = simplify(args[0]['z3'])
offset= simplify(args[1]['z3'])
exact_address = addr.as_long() if is_bv_value(addr) else -1
exact_offset = offset.as_long() if is_bv_value(offset) else -1
res = {'type':'undefined','step':step}
if exact_address >= 0 and exact_offset >= 0:
if (exact_offset % 32) == 0 : # for now, can deal only with offsets divisible by 32
val = ''
all_good = True
for i in range(exact_offset/32):
if (exact_address + i*32) not in mmemory or not is_fixed(mmemory[exact_address+i*32]):
all_good = False
break
val += '%064x' % get_value(mmemory[exact_address + i*32])
if all_good:
k = keccak_256()
k.update(val.encode('utf-8'))
digest = k.hexdigest()
res = {'type':'constant','step':step, 'z3':BitVecVal(int(digest,16), 256) }
if MyGlobals.symbolic_sha and is_undefined(res):
res = {'type':'constant','step':step, 'z3': BitVec('sha-'+str(step)+'-'+str(calldepth),256) }
stack.append( res )
elif op.find('PUSH') >= 0: stack.append( {'type':'constant','step':step, 'z3':BitVecVal(int(code[pos]['input'],16), 256)} )
elif op.find('DUP' ) >= 0: stack.append( copy.deepcopy( stack[-int(op[3:]) ] ) )
elif op.find('SWAP') >= 0:
tmp1 = stack[-1]
tmp2 = stack[-int(op[4:])-1 ]
stack[-1] = tmp2
stack[-int(op[4:]) -1] = tmp1
# assign symbolic variable to some of the parameters (such as CALLVALUE, TIMESTAMP, etc)
# only if they are selected to get one
# otherwise, below, they will get fixed value (BitVecVal) as specified
elif op in MyGlobals.symbolic_vars:
stack.append( {'type':'constant','step':step, 'z3': BitVec(op+'-'+str(calldepth),256) } )
elif op == 'NUMBER': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(int(get_params('block_number',''),16), 256)} )
elif op == 'GASLIMIT': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(int(get_params('gas_limit',''),16), 256)} )
elif op == 'TIMESTAMP': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(int(get_params('time_stamp',''),16), 256)} )
elif op == 'CALLVALUE': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(int(get_params('call_value',''),16), 256)} )
elif op == 'ADDRESS': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(int(get_params('contract_address',''), 16), 256)} )
elif op == 'ORIGIN': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(int(get_params('contract_address',''), 16), 256)} )
elif op == 'GASPRICE': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(int(get_params('gas_price',''), 16), 256) } )
elif op == 'COINBASE': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(0,256)} )
elif op == 'DIFFICULTY': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(0,256)} )
elif op == 'CALLER': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(int(get_params('my_address',''), 16), 256) } )
elif op == 'GAS': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(int(get_params('gas',''),16), 256) } )
elif op == 'MSIZE': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(len(mmemory), 256) } )
elif op == 'BLOCKHASH': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(0x123,256)} ) # does not use the argument which specifies the blocknumber
elif op == 'BALANCE': stack.append( {'type':'constant','step':step, 'z3': BitVecVal(int(get_params('contract_balance',''), 10), 256)} ) # always assume that it is the balance of the current contract
elif op == 'POP': pass
elif op.find('LOG') >= 0: pass
elif op == 'CODECOPY': pass
elif op == 'JUMPDEST':
if not is_good_jump(code, pos, debug):
return pos, True
elif op in ['STOP','RETURN','REVERT', 'INVALID', 'SUICIDE']: halt = True
elif op in ['CALLDATALOAD']:
addr = args[0]
if is_fixed( addr ):
addr = get_value(addr)
# If symmbolic variable does not exist, then create it
if ('data-'+str(calldepth)+'-' + str(addr)) not in data:
data['data-'+str(calldepth)+'-' + str(addr)] = BitVec('input'+str(calldepth)+'['+str(addr)+']', 256)
stack.append( {'type':'constant','step':step, 'z3':data['data-'+str(calldepth)+'-' + str(addr)] } )
elif is_undefined(addr):
if debug:
print ('\033[95m[-] In CALLDATALOAD the input address cannot be determined at step %x: \033[0m' % code[pos]['id'] )
print( addr )
return pos, True
#
# if the address is not fixed (symbolic expression) then assume we are dealing with dynamic array
# and input[ address ] is the length of the array
else:
stack.append( args[0] )
return pos, False
elif op in ['CALLDATASIZE']:
return pos, False
elif op == 'CALL':
if is_fixed(args[5]) and is_fixed(args[6]):
addr = get_value( args[5] )
value = get_value( args[6] )
if value < 10000:
for i in range(value/32):
mmemory[addr + 32 * i] = { 'type':'undefined','step':step }
stack.append( {'type':'constant','step':step, 'z3':BitVec('call_at_step_'+str(step), 256) & 0x1} ) # assume the result of call can be any (True or False)
elif op == 'CALLDATACOPY':
memaddr = args[0]
datapos = args[1]
length = args[2]
if not is_fixed(memaddr) or not is_fixed( datapos ) or not is_fixed( length ):
if debug:
print('\033[95m[-] In CALLDATACOPY the memory address or datapos or length cannot be determined \033[0m' )
print(memaddr)
print(datapos)
print(length)
return pos, True
memaddr = get_value ( memaddr )
datapos = get_value ( datapos )
length = get_value ( length )
if length % 32 != 0:
if debug:
print('\033[95m[-] In CALLDATACOPY the length of array (%d) is not multiple of 32 \033[0m' % length )
return pos, True
for i in range( length / 32 ):
data[ datapos + 32 * i ] = BitVec('input'+str(calldepth)+'['+str(datapos + 32 * i )+']',256)
store_in_memory( mmemory, memaddr + 32 * i , {'type':'constant','step':step,'z3':data[ datapos + 32 * i ]} )
# Truncate the storing only to 32 byte values
elif op == 'CALLCODE': stack.append( {'type':'constant','step':step, 'z3':BitVecVal(0,256)} )
elif op == 'DELEGATECALL': stack.append( {'type':'constant','step':step, 'z3':BitVecVal(0,256)} )
elif op == 'EXTCODESIZE': stack.append( {'type':'constant','step':step, 'z3':BitVecVal(0,256)} )
elif op == 'CREATE': stack.append( {'type':'constant','step':step, 'z3':BitVecVal(0,256)} )
elif op == 'MLOAD':
addr = args[0]
if is_undefined(addr):
if debug:print('\033[95m[-] The MLOAD address on %x cannot be determined\033[0m' % code[pos]['id'] )
return pos, True
addr = simplify(addr['z3'])
if is_bv_value(addr):
exact_address = addr.as_long()
if exact_address in mmemory: res = copy.deepcopy(mmemory[exact_address])
else:
res = {'type':'constant','step':step, 'z3': BitVecVal(0, 256) }
stack.append( res )
else:
if debug:print('\033[95m[-] The MLOAD address on %x cannot be determined\033[0m' % code[pos]['id'] )
return pos, True
elif op == 'MSTORE':
addr = args[0]
if is_undefined(addr) or not is_bv_value( simplify(addr['z3']) ) :
if debug:print('\033[95m[-] The MSTORE the write address on %x cannot be determined\033[0m' % code[pos]['id'] )
return pos, True
t = copy.deepcopy( args[1] )
addr = get_value(addr)
store_in_memory( mmemory, addr, t )
elif op in ['MSTORE8']:
addr = args[0]
value= args[1]
if not is_fixed(addr) :
if debug:print('\033[95m[-] The MSTORE8 the write address on %x cannot be determined\033[0m' % code[pos]['id'] )
return pos, True
if not is_fixed(value) :
if debug:print('\033[95m[-] The MSTORE8 value is undefined \033[0m' % code[pos]['id'] )
return pos, True
ea = get_value(addr)
ev = get_value(value) % 256
if (ea/32)*32 not in mmemory:
mmemory[(ea/32)*32] = {'type':'constant','step':step, 'z3':BitVecVal(ev << (31- (ea%32)), 256) }
elif is_fixed( mmemory[(ea/32)*32]['z3'] ):
v = get_value( mmemory[(ea/32)*32]['z3'] )
v = (v & (~BitVecVal(0xff,256) << (31- (ea%32)))) ^ (ev << (31- (ea%32)))
mmemory[(ea/32)*32]['z3'] = v
elif op == 'SLOAD':
addr = args[0]
if is_undefined(addr):
if debug:print('\033[95m[-] The SLOAD address on %x cannot be determined\033[0m' % code[pos]['id'] )
return pos, True
addr = simplify(addr['z3'])
if is_bv_value(addr):
exact_address = addr.as_long()
if exact_address in storage:
total_values = len(storage[exact_address])
if total_values == 0:
print('In SLOAD the list at address %x has no elements ' % exact_address)
exit(0)
return pos, True
else:
res = copy.deepcopy(storage[exact_address][0])
else:
if MyGlobals.web3 is not None and read_from_blockchain:
value = MyGlobals.web3.eth.getStorageAt( get_params('contract_address',''), exact_address )
else:
value = '0'
t = {'type':'constant','step':step, 'z3': BitVecVal(int(value,16), 256) }
storage[exact_address] = [ t ]
res = copy.deepcopy(t)
stack.append( res )
else:
if MyGlobals.symbolic_load:
stack.append({'type':'constant','step':step, 'z3': BitVec('sload-'+str(step)+'-'+str(calldepth),256) } )
else:
if debug:print('\033[95m[-] The SLOAD address on %x cannot be determined\033[0m' % code[pos]['id'] )
return pos, True
elif op == 'SSTORE':
addr = args[0]
if is_undefined(addr):
if debug:print('\033[95m[-] The SSTORE address on %x cannot be determined\033[0m' % code[pos]['id'] )
return pos, True
t = copy.deepcopy( args[1] )
if is_bv_value( simplify(addr['z3']) ):
va = get_value( addr )
storage[va] = [t];
else:
if MyGlobals.symbolic_load:
pass
else:
if debug:
print ('\033[95m[-] In SSTORE the write address cannot be determined at step %x: \033[0m' % code[pos]['id'] )
print( addr )
return pos, True
elif op == 'JUMP':
addr = args[0]
if not is_fixed( addr ):
if debug: print('\033[95m[-] In JUMP the address cannot be determined \033[0m' )
return pos, True
jump_dest = get_value( addr )
if( jump_dest <= 0):
if debug: print('\033[95m[-] The JUMP destination is not a valid address : %x\033[0m' % jump_dest )
return pos, True
new_position= find_pos(code, jump_dest )
if( new_position < 0):
if debug: print('\033[95m[-] The code has no such JUMP destination: %s at line %x\033[0m' % (hex(jump_dest), code[pos]['id']) )
return pos, True
if not is_good_jump(code, new_position, debug):
return pos, True
return new_position, False
elif op == 'JUMPI': return pos , False
elif op == 'BYTE':
byte_no = args[0]
word = args[1]
if is_undefined(word) or is_undefined(byte_no):
res = {'type':'undefined','step':step}
else:
res = {'type':'constant','step':step, 'z3': (word['z3'] >> (8*(31-byte_no['z3'])) ) & 0xff }
stack.append( res )
else:
executed = False
if executed and final_stack_size != len(stack):
print('Incorrect final stack size after executing %s at step %x' % (op,step))
print(len(stack))
print(final_stack_size)
exit(2)
return pos + 1, halt
| 20,718 | 34.846021 | 215 | py |
MAIAN | MAIAN-master/tool/contracts.py | from __future__ import print_function
from web3 import Web3, KeepAliveRPCProvider, IPCProvider
import os.path
import json
import sched, time
import sys
import glob
import sys
import json
import rlp
from rlp.utils import decode_hex, encode_hex, ascii_chr, str_to_bytes
from subprocess import Popen, PIPE, STDOUT
from values import MyGlobals
from blockchain import *
def compile_contract(filename):
print('\033[1m[ ] Compiling Solidity contract from the file %s ... \033[0m' % filename, end='')
source_file = filename
if (not os.path.isfile(source_file) ):
print('\033[91m[-] Contract file %s does NOT exist\033[0m' % source_file )
return
with open(filename, 'r') as myfile:
code=myfile.read()
p=Popen(['solc','--bin','--abi','-o','out',source_file,'--overwrite'], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
solo = ''
while p.poll() is None:
l = p.stdout.readline()
solo += bytes.decode(l)
if 'Error' in solo:
print(solo)
print('\033[91m[-] Cannot compile the contract \033[0m')
exit()
p.wait()
print('\033[92m Done \033[0m')
def get_function_hashes(contract):
data = json.load(open('out/'+contract+'.abi'))
fhashes = {}
for d in data:
if 'type' in d and d['type'] == 'function':
hs = ''
if 'name' in d: hs = d['name']
fr = True
hs += '('
if 'inputs' in d:
for i in d['inputs']:
if not fr: hs +=','
hs += i['type']
fr = False
hs += ')'
hash_op = Web3.sha3(hs.encode('utf-8'), encoding='bytes')
fhashes[hash_op[2:10]] = hs
return fhashes
def deploy_contract(filename, etherbase, rawcode = False):
if rawcode:
with open(filename, 'r') as myfile: byt=myfile.read()
filename_write_address = os.path.basename(filename)+'.address'
else:
filename_abi = os.path.basename(filename)+'.abi'
filename_bin = os.path.basename(filename)+'.bin'
filename_write_address = os.path.basename(filename)+'.address'
with open('./out/'+filename_abi, 'r') as myfile: abi=myfile.read()
with open('./out/'+filename_bin, 'r') as myfile: byt=myfile.read()
if( len(abi) == 0 or len(byt) == 0 ):
print('\033[91m[-] Some of the files is missing or empty: |%s|=%d |%s|=%d' % (filename_abi, len(abi), filename_bin, len(byt) ) )
print('The contracts have NOT been deployed\033[0m')
return
abi = json.loads(abi)
print('\033[1m[ ] Deploying contract \033[0m', end='')
MyGlobals.web3.personal.unlockAccount(etherbase, '1', 15000)
try:
transaction_creation_hash = MyGlobals.web3.eth.sendTransaction( {'from':etherbase, 'data': ('0x' if byt[0:2]!='0x' else '') +byt , 'gas':6000000} )
except Exception as e:
print ("Exception: "+str(e))
return None
global s
s = sched.scheduler(time.time, time.sleep)
s.enter(1, 1, confirm_contract, (transaction_creation_hash,))
s.run()
print('\033[92m confirmed at address: %s \033[0m' % contract_address)
fullcode = MyGlobals.web3.eth.getCode(contract_address)
print('\033[1m[ ] Contract code length on the blockchain : \033[0m \033[92m %d : %s... \033[0m' % (len(fullcode), fullcode[:30] ) )
with open('./out/'+filename_write_address, 'w') as f:
f.write(contract_address)
f.close()
print('\033[1m[ ] Contract address saved in file: \033[0m\033[92m%s \033[0m' % ('./out/'+filename_write_address))
return contract_address
def confirm_contract(transaction_creation_hash):
print('.', end="")
# sys.stdout.flush()
global contract_address
receipt = MyGlobals.web3.eth.getTransactionReceipt(transaction_creation_hash)
if( receipt is not None):
contract_address = receipt['contractAddress']
return
s.enter(1, 1, confirm_contract, (transaction_creation_hash,))
def rlp_encode(input):
if isinstance(input,str):
if len(input) == 1 and ord(input) < 0x80: return input
else: return encode_length(len(input), 0x80) + input
elif isinstance(input,list):
output = ''
for item in input: output += rlp_encode(item)
return encode_length(len(output), 0xc0) + output
def encode_length(L,offset):
if L < 56:
return chr(L + offset)
elif L < 256**8:
BL = to_binary(L)
return chr(len(BL) + offset + 55) + BL
else:
raise Exception("input too long")
def to_binary(x):
if x == 0:
return ''
else:
return to_binary(int(x / 256)) + chr(x % 256)
def normalize_address(x, allow_blank=False):
if allow_blank and x == '':
return ''
if len(x) in (42, 50) and x[:2] == '0x':
x = x[2:]
if len(x) in (40, 48):
x = decode_hex(x)
if len(x) == 24:
assert len(x) == 24 and sha3(x[:20])[:4] == x[-4:]
x = x[:20]
if len(x) != 20:
raise Exception("Invalid address format: %r" % x)
return x
def predict_contract_address(accountAddress):
nonce = int(MyGlobals.web3.eth.getTransactionCount(accountAddress))
adr = Web3.sha3(rlp.encode([normalize_address(accountAddress), nonce]), encoding='bytes')[-40:]
return '0x'+adr
| 5,386 | 30.688235 | 155 | py |
MAIAN | MAIAN-master/tool/values.py | from web3 import Web3, KeepAliveRPCProvider, IPCProvider
import copy
from z3 import *
# Get value
def get_params(param, input):
if (param+str(input)) in MyGlobals.st:
return MyGlobals.st[param+str(input)]
else:
print('need to set the parameters: %s ' % (param+str(input) ) )
exit(4)
# Is set
def is_params(param,input):
return (param+str(input)) in MyGlobals.st
# Set parameter
def set_params(param, input, value):
global st
MyGlobals.st[param+str(input)] = value
# Create a dict of paramters
def initialize_params(read_from_blockchain, c_address):
# Set (dummy) values for some blockchain parameters used in the contracts
global st
MyGlobals.st = {}
MyGlobals.st['my_address'] = MyGlobals.adversary_account
MyGlobals.st['contract_address'] = c_address
if read_from_blockchain:
MyGlobals.st['contract_balance'] = str(MyGlobals.web3.eth.getBalance(c_address)+1).zfill(64)
else:
MyGlobals.st['contract_balance'] = '7' * 64
MyGlobals.st['gas'] = ('765432').zfill(64)
MyGlobals.st['gas_limit'] = ('%x' % 5000000).zfill(64)
MyGlobals.st['gas_price'] = ('123').zfill(64)
MyGlobals.st['time_stamp'] = ('%x' % 0x7687878).zfill(64)
MyGlobals.st['block_number'] = ('545454').zfill(64)
def print_params():
for s in MyGlobals.st:
print('%20s : %s' % (s, str(MyGlobals.st[s])))
def create_configuration( stack, mmemory, storage):
nc = {}
nc['stack'] = copy.deepcopy(stack)
nc['mmemory'] = copy.deepcopy(mmemory)
nc['storage'] = copy.deepcopy(storage)
return nc
def add_configuration( step, configurations, nc):
if step in configurations: configurations[step].append( nc )
else:configurations[step] = [nc]
def configuration_exist(step, configurations, nc):
if step not in configurations:
return False
found = False
for os in configurations[step]:
# Compare stack
if os['stack'] != nc['stack'] : continue
# Compare mmemory
if os['mmemory'] != nc['mmemory']: continue
# Compare storage
if( os['storage'] != nc['storage'] ):continue
found = True
break
return found
def seen_configuration( configurations, ops, position, stack, mmemory, storage):
# Check if configuration exist
op = ops[position]['o']
step = ops[position]['id']
nc = create_configuration( stack, mmemory, storage)
if configuration_exist(step, configurations, nc):
return True
else:
add_configuration( step, configurations, nc)
return False
def print_configuration( conf ):
for c in conf:
print_stack( c['stack'] )
print_storage(c['storage'])
class MyGlobals(object):
MAX_JUMP_DEPTH = 60 # path length in CFG
MAX_CALL_DEPTH = 0 # different function calls to the contract
MAX_VISITED_NODES = 2000 # sum of all paths in search of one contract
max_calldepth_in_normal_search = 3
ETHER_LOCK_GOOD_IF_CAN_CALL = True
st = {}
#
# Z3 solver
#
s = None
SOLVER_TIMEOUT = 10000 #timeout
search_condition_found = False
stop_search = False
visited_nodes = 0
last_eq_step = -1
last_eq_func = -1
symbolic_vars = []
no_function_calls = 0
function_calls = {}
symbolic_sha = False
symbolic_load = False
# Params related to blockchain
port_number = '8550'
confirming_transaction ='0x3094c123bd9ffc3f41dddefd3ea88e4296e45015b62e892f8bdf9d1b645ef2d2'
etherbase_account = '0x69190bde29255c02363477462f17e816a9533d3a'
adversary_account = '5a1cd1d07d9f59898c434ffc90a74ecd937feb12'
sendingether_account = '564625b3ae8d0602a8fc0fe22c884b091098417f'
send_initial_wei = 44
web3 = None
#
debug = False
read_from_blockchain = False
checktype = 0
exec_as_script = False
def clear_globals():
MyGlobals.s = Solver()
MyGlobals.s.set("timeout", MyGlobals.SOLVER_TIMEOUT)
MyGlobals.search_condition_found = False
MyGlobals.stop_search = False
MyGlobals.visited_nodes = 0
MyGlobals.no_function_calls = 0
MyGlobals.function_calls = {}
| 4,399 | 23.719101 | 100 | py |
tvm | tvm-main/conftest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import hashlib
import pytest
import sys
import os
from pathlib import Path
pytest_plugins = ["tvm.testing.plugin"]
IS_IN_CI = os.getenv("CI", "") == "true"
REPO_ROOT = Path(__file__).resolve().parent
# These are long running tests (manually curated and extracted from CI logs)
# that should be allocated to test shards in a round-robin fashion. These are
# taken from the 20 (arbitrary number) of tests as from
# https://ci.tlcpack.ai/job/tvm/job/main/2907/testReport
_slowest_tests = [
"tests/python/frontend/tensorflow/test_forward.py::test_forward_broadcast_args",
"tests/python/frontend/tensorflow/test_forward.py::test_forward_broadcast_to",
"tests/python/topi/python/test_topi_conv2d_int8.py::test_conv2d_nchw[int8]",
"tests/python/topi/python/test_topi_conv2d_int8.py::test_conv2d_nchw[uint8]",
"tests/python/topi/python/test_topi_upsampling.py::test_upsampling3d",
"tests/python/topi/python/test_topi_upsampling.py::test_upsampling3d",
"tests/python/topi/python/test_topi_conv2d_int8.py::test_conv2d_nchw[int8]",
"tests/python/frontend/tflite/test_forward.py::test_all_elemwise",
"tests/python/frontend/pytorch/test_object_detection.py::test_detection_models",
"tests/python/topi/python/test_topi_conv2d_int8.py::test_conv2d_nchw[uint8]",
"tests/python/topi/python/test_topi_conv2d_NCHWc.py::test_conv2d_NCHWc",
"tests/python/topi/python/test_topi_conv2d_hwnc_tensorcore.py::test_conv2d_hwnc_tensorcore",
"tests/python/contrib/test_tensorrt.py::test_binary[compile]",
"tests/python/frontend/pytorch/test_forward.py::test_segmentation_models",
"tests/python/topi/python/test_topi_conv2d_NCHWc.py::test_conv2d_NCHWc",
"tests/python/relay/test_py_converter.py::test_global_recursion",
"tests/python/frontend/tensorflow/test_forward.py::test_forward_ptb",
"tests/python/relay/test_op_level6.py::test_topk",
"tests/python/topi/python/test_topi_conv2d_winograd.py::test_conv2d_nchw",
"tests/python/relay/test_py_converter.py::test_global_recursion",
]
HARDCODED_ALLOCATIONS = {}
for idx, test in enumerate(_slowest_tests):
HARDCODED_ALLOCATIONS[test] = idx
# These rely on running on the same node to pass successfully
FIXED_ALLOCATION_PREFIXES = {
"tests/python/unittest/test_tvm_testing_features.py": 0,
}
def find_shard_index(nodeid: str, num_shards: int) -> int:
"""
Return the index of the shard that should run this test
"""
for prefix, target_shard_idx in FIXED_ALLOCATION_PREFIXES.items():
if nodeid.startswith(prefix):
if target_shard_idx >= num_shards:
raise RuntimeError(
f"Cannot collect sharded tests, {nodeid} has hardcoded shard index {target_shard_idx} among only {num_shards} shards"
)
return target_shard_idx
if nodeid in HARDCODED_ALLOCATIONS:
hash = HARDCODED_ALLOCATIONS[nodeid]
else:
hash = hashlib.md5(nodeid.encode())
hash = int(hash.hexdigest(), 16)
return hash % num_shards
def pytest_collection_modifyitems(config, items):
if not all(k in os.environ for k in ["CI", "TVM_NUM_SHARDS", "TVM_SHARD_INDEX"]):
# Only apportion tests if in CI and in a job that is set up for it
return
num_shards = int(os.environ["TVM_NUM_SHARDS"])
shard_index = int(os.environ["TVM_SHARD_INDEX"])
print(f"Marking tests for shard {shard_index} of {num_shards}")
items_copy = list(items)
for item in items_copy:
item_shard_index = find_shard_index(item.nodeid, num_shards=num_shards)
if item_shard_index != shard_index:
items.remove(item)
def pytest_sessionstart():
if IS_IN_CI:
hook_script_dir = REPO_ROOT / "tests" / "scripts" / "request_hook"
sys.path.append(str(hook_script_dir))
import request_hook # pylint: disable=import-outside-toplevel
request_hook.init()
| 4,701 | 42.137615 | 137 | py |
tvm | tvm-main/version.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This is the global script that set the version information of TVM.
This script runs and update all the locations that related to versions
List of affected files:
- tvm-root/python/tvm/_ffi/libinfo.py
- tvm-root/include/tvm/runtime/c_runtime_api.h
- tvm-root/conda/recipe/meta.yaml
- tvm-root/web/package.json
"""
import os
import re
import argparse
import logging
import subprocess
# Modify the following value during release
# ---------------------------------------------------
# Current version:
# We use the version of the incoming release for code
# that is under development.
#
# It is also fallback version to be used when --git-describe
# is not invoked, or when the repository does not present the
# git tags in a format that this script can use.
#
# Two tag formats are supported:
# - vMAJ.MIN.PATCH (e.g. v0.8.0) or
# - vMAJ.MIN.devN (e.g. v0.8.dev0)
__version__ = "0.14.dev0"
# ---------------------------------------------------
PROJ_ROOT = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
def py_str(cstr):
return cstr.decode("utf-8")
def git_describe_version():
"""Get PEP-440 compatible public and local version using git describe.
Returns
-------
pub_ver: str
Public version.
local_ver: str
Local version (with additional label appended to pub_ver).
Notes
-----
- We follow PEP 440's convention of public version
and local versions.
- Only tags conforming to vMAJOR.MINOR.REV (e.g. "v0.7.0")
are considered in order to generate the version string.
See the use of `--match` in the `git` command below.
Here are some examples:
- pub_ver = '0.7.0', local_ver = '0.7.0':
We are at the 0.7.0 release.
- pub_ver = '0.8.dev94', local_ver = '0.8.dev94+g0d07a329e':
We are at the 0.8 development cycle.
The current source contains 94 additional commits
after the most recent tag(v0.7.0),
the git short hash tag of the current commit is 0d07a329e.
"""
cmd = [
"git",
"describe",
"--tags",
"--match",
"v[0-9]*.[0-9]*.[0-9]*",
"--match",
"v[0-9]*.[0-9]*.dev[0-9]*",
]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=PROJ_ROOT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = py_str(out)
if msg.find("not a git repository") != -1:
return __version__, __version__
logging.warning("git describe: %s, use %s", msg, __version__)
return __version__, __version__
describe = py_str(out).strip()
arr_info = describe.split("-")
# Remove the v prefix, mainly to be robust
# to the case where v is not presented as well.
if arr_info[0].startswith("v"):
arr_info[0] = arr_info[0][1:]
# hit the exact tag
if len(arr_info) == 1:
return arr_info[0], arr_info[0]
if len(arr_info) != 3:
logging.warning("Invalid output from git describe %s", describe)
return __version__, __version__
dev_pos = arr_info[0].find(".dev")
# Development versions:
# The code will reach this point in case it can't match a full release version, such as v0.7.0.
#
# 1. in case the last known label looks like vMAJ.MIN.devN e.g. v0.8.dev0, we use
# the current behaviour of just using vMAJ.MIN.devNNNN+gGIT_REV
if dev_pos != -1:
dev_version = arr_info[0][: arr_info[0].find(".dev")]
# 2. in case the last known label looks like vMAJ.MIN.PATCH e.g. v0.8.0
# then we just carry on with a similar version to what git describe provides, which is
# vMAJ.MIN.PATCH.devNNNN+gGIT_REV
else:
dev_version = arr_info[0]
pub_ver = "%s.dev%s" % (dev_version, arr_info[1])
local_ver = "%s+%s" % (pub_ver, arr_info[2])
return pub_ver, local_ver
# Implementations
def update(file_name, pattern, repl, dry_run=False):
update = []
hit_counter = 0
need_update = False
with open(file_name) as file:
for l in file:
result = re.findall(pattern, l)
if result:
assert len(result) == 1
hit_counter += 1
if result[0] != repl:
l = re.sub(pattern, repl, l)
need_update = True
print("%s: %s -> %s" % (file_name, result[0], repl))
else:
print("%s: version is already %s" % (file_name, repl))
update.append(l)
if hit_counter != 1:
raise RuntimeError("Cannot find version in %s" % file_name)
if need_update and not dry_run:
with open(file_name, "w") as output_file:
for l in update:
output_file.write(l)
def sync_version(pub_ver, local_ver, dry_run):
"""Synchronize version."""
# python uses the PEP-440: local version
update(
os.path.join(PROJ_ROOT, "python", "tvm", "_ffi", "libinfo.py"),
r"(?<=__version__ = \")[.0-9a-z\+]+",
local_ver,
dry_run,
)
# Use public version for other parts for now
# Note that full git hash is already available in libtvm
# C++ header
update(
os.path.join(PROJ_ROOT, "include", "tvm", "runtime", "c_runtime_api.h"),
r'(?<=TVM_VERSION ")[.0-9a-z\+]+',
pub_ver,
dry_run,
)
# conda
update(
os.path.join(PROJ_ROOT, "conda", "recipe", "meta.yaml"),
r"(?<=version = ')[.0-9a-z\+]+",
pub_ver,
dry_run,
)
# web
# change to pre-release convention by npm
dev_pos = pub_ver.find(".dev")
npm_ver = pub_ver if dev_pos == -1 else "%s.0-%s" % (pub_ver[:dev_pos], pub_ver[dev_pos + 1 :])
update(
os.path.join(PROJ_ROOT, "web", "package.json"),
r'(?<="version": ")[.0-9a-z\-\+]+',
npm_ver,
dry_run,
)
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="Detect and synchronize version.")
parser.add_argument(
"--print-version",
action="store_true",
help="Print version to the command line. No changes is applied to files.",
)
parser.add_argument(
"--git-describe",
action="store_true",
help="Use git describe to generate development version.",
)
parser.add_argument("--dry-run", action="store_true")
opt = parser.parse_args()
pub_ver, local_ver = __version__, __version__
if opt.git_describe:
pub_ver, local_ver = git_describe_version()
if opt.print_version:
print(local_ver)
else:
sync_version(pub_ver, local_ver, opt.dry_run)
if __name__ == "__main__":
main()
| 7,491 | 31.154506 | 99 | py |
tvm | tvm-main/apps/extension/python/tvm_ext/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example extension package of TVM."""
from __future__ import absolute_import
import os
import ctypes
# Import TVM first to get library symbols
import tvm
from tvm import te
def load_lib():
"""Load library, the functions will be registered into TVM"""
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
# load in as global so the global extern symbol is visible to other dll.
lib = ctypes.CDLL(os.path.join(curr_path, "../../lib/libtvm_ext.so"), ctypes.RTLD_GLOBAL)
return lib
_LIB = load_lib()
# Expose two functions into python
bind_add = tvm.get_global_func("tvm_ext.bind_add")
sym_add = tvm.get_global_func("tvm_ext.sym_add")
ivec_create = tvm.get_global_func("tvm_ext.ivec_create")
ivec_get = tvm.get_global_func("tvm_ext.ivec_get")
@tvm.register_object("tvm_ext.IntVector")
class IntVec(tvm.Object):
"""Example for using extension class in c++"""
@property
def _tvm_handle(self):
return self.handle.value
def __getitem__(self, idx):
return ivec_get(self, idx)
nd_create = tvm.get_global_func("tvm_ext.nd_create")
nd_add_two = tvm.get_global_func("tvm_ext.nd_add_two")
nd_get_additional_info = tvm.get_global_func("tvm_ext.nd_get_additional_info")
@tvm.register_object("tvm_ext.NDSubClass")
class NDSubClass(tvm.nd.NDArrayBase):
"""Example for subclassing TVM's NDArray infrastructure.
By inheriting TVM's NDArray, external libraries could
leverage TVM's FFI without any modification.
"""
@staticmethod
def create(additional_info):
return nd_create(additional_info)
@property
def additional_info(self):
return nd_get_additional_info(self)
def __add__(self, other):
return nd_add_two(self, other)
| 2,542 | 30.7875 | 93 | py |
tvm | tvm-main/apps/extension/tests/test_ext.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm_ext
import tvm
import tvm._ffi.registry
import tvm.testing
from tvm import te
import numpy as np
def test_bind_add():
def add(a, b):
return a + b
f = tvm_ext.bind_add(add, 1)
assert f(2) == 3
def test_ext_dev():
n = 10
A = te.placeholder((n,), name="A")
B = te.compute((n,), lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
def check_llvm():
if not tvm.testing.device_enabled("llvm"):
return
f = tvm.build(s, [A, B], "ext_dev", "llvm")
dev = tvm.ext_dev(0)
# launch the kernel.
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), a.numpy() + 1)
check_llvm()
def test_sym_add():
a = te.var("a")
b = te.var("b")
c = tvm_ext.sym_add(a, b)
assert c.a == a and c.b == b
def test_ext_vec():
ivec = tvm_ext.ivec_create(1, 2, 3)
assert isinstance(ivec, tvm_ext.IntVec)
assert ivec[0] == 1
assert ivec[1] == 2
def ivec_cb(v2):
assert isinstance(v2, tvm_ext.IntVec)
assert v2[2] == 3
tvm.runtime.convert(ivec_cb)(ivec)
def test_extract_ext():
fdict = tvm._ffi.registry.extract_ext_funcs(tvm_ext._LIB.TVMExtDeclare)
assert fdict["mul"](3, 4) == 12
def test_extern_call():
n = 10
A = te.placeholder((n,), name="A")
B = te.compute(
(n,), lambda *i: tvm.tir.call_extern("float32", "TVMTestAddOne", A(*i)), name="B"
)
s = te.create_schedule(B.op)
def check_llvm():
if not tvm.testing.device_enabled("llvm"):
return
f = tvm.build(s, [A, B], "llvm")
dev = tvm.cpu(0)
# launch the kernel.
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), a.numpy() + 1)
check_llvm()
def test_nd_subclass():
a = tvm_ext.NDSubClass.create(additional_info=3)
b = tvm_ext.NDSubClass.create(additional_info=5)
assert isinstance(a, tvm_ext.NDSubClass)
c = a + b
d = a + a
e = b + b
assert a.additional_info == 3
assert b.additional_info == 5
assert c.additional_info == 8
assert d.additional_info == 6
assert e.additional_info == 10
if __name__ == "__main__":
test_nd_subclass()
test_extern_call()
test_ext_dev()
test_ext_vec()
test_bind_add()
test_sym_add()
test_extract_ext()
| 3,361 | 26.557377 | 89 | py |
tvm | tvm-main/apps/lldb/tvm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Pretty Printers for lldb debugger.
Install the pretty printers by loading this file from .lldbinit:
command script import ~/bin/lldb/tvm.py
Update the list of nodes for which debug information is displayed
by adding to the list below.
"""
import lldb
import lldb.formatters
g_indent = 0
def __lldb_init_module(debugger, _):
# Only types that are supported by PrettyPrint() will be printed.
for node in [
"tvm::Array",
"tvm::AttrFieldInfo",
"tvm::Attrs",
"tvm::BijectiveLayout",
"tvm::Buffer",
"tvm::Channel",
"tvm::EnvFunc",
"tvm::Expr",
"tvm::GenericFunc",
"tvm::Integer",
"tvm::IterVar",
"tvm::IterVarAttr",
"tvm::IterVarRelation",
"tvm::Layout",
"tvm::Map",
"tvm::Map",
"tvm::MemoryInfo",
"tvm::Operation",
"tvm::Range",
"tvm::Schedule",
"tvm::Stage",
"tvm::Stmt",
"tvm::Target",
"tvm::Tensor",
"tvm::TensorIntrin",
"tvm::TensorIntrinCall",
"tvm::TypedEnvFunc",
"tvm::tir::Var",
"tvm::ir::CommReducer",
"tvm::ir::FunctionRef",
"tvm::relay::BaseTensorType",
"tvm::relay::CCacheKey",
"tvm::relay::CCacheValue",
"tvm::relay::CachedFunc",
"tvm::relay::Call",
"tvm::relay::Clause",
"tvm::relay::Closure",
"tvm::relay::CompileEngine",
"tvm::relay::Constant",
"tvm::relay::Constructor",
"tvm::relay::ConstructorValue",
"tvm::relay::Expr",
"tvm::relay::FuncType",
"tvm::relay::Function",
"tvm::relay::GlobalTypeVar",
"tvm::relay::GlobalVar",
"tvm::relay::Id",
"tvm::relay::If",
"tvm::relay::IncompleteType",
"tvm::relay::InterpreterState",
"tvm::relay::Let",
"tvm::relay::Match",
"tvm::relay::Module",
"tvm::relay::NamedNDArray",
"tvm::relay::Op",
"tvm::relay::Pattern",
"tvm::relay::PatternConstructor",
"tvm::relay::PatternTuple",
"tvm::relay::PatternVar",
"tvm::relay::PatternWildcard",
"tvm::relay::RecClosure",
"tvm::relay::RefCreate",
"tvm::relay::RefRead",
"tvm::relay::RefType",
"tvm::relay::RefValue",
"tvm::relay::RefWrite",
"tvm::relay::SourceName",
"tvm::relay::Span",
"tvm::relay::TempExpr",
"tvm::relay::TensorType",
"tvm::relay::Tuple",
"tvm::relay::TupleGetItem",
"tvm::relay::TupleType",
"tvm::relay::Type",
"tvm::relay::TypeCall",
"tvm::relay::TypeConstraint",
"tvm::relay::TypeData",
"tvm::relay::TypeRelation",
"tvm::relay::TypeReporter",
"tvm::relay::TypeVar",
"tvm::relay::Value",
"tvm::relay::Var",
"tvm::relay::alter_op_layout::LayoutAlternatedExpr",
"tvm::relay::alter_op_layout::TransformMemorizer",
"tvm::relay::fold_scale_axis::Message",
"tvm::relay::fold_scale_axis:BackwardTransformer",
]:
debugger.HandleCommand(
"type summary add -F tvm.NodeRef_SummaryProvider {node} -w tvm".format(node=node)
)
debugger.HandleCommand("command script add -f tvm.PrettyPrint pp")
debugger.HandleCommand("type category enable tvm")
def _log(logger, fmt, *args, **kwargs):
global g_indent
logger >> " " * g_indent + fmt.format(*args, **kwargs)
def _GetContext(debugger):
target = debugger.GetSelectedTarget()
process = target.GetProcess()
thread = process.GetThreadAtIndex(0)
return thread.GetSelectedFrame()
def PrettyPrint(debugger, command, result, internal_dict):
ctx = _GetContext(debugger)
rc = ctx.EvaluateExpression("tvm::PrettyPrint({command})".format(command=command))
result.AppendMessage(str(rc))
class EvaluateError(Exception):
def __init__(self, error):
super(Exception, self).__init__(str(error))
def _EvalExpression(logger, ctx, expr, value_name):
_log(logger, "Evaluating {expr}".format(expr=expr))
rc = ctx.EvaluateExpression(expr)
err = rc.GetError()
if err.Fail():
_log(logger, "_EvalExpression failed: {err}".format(err=err))
raise EvaluateError(err)
_log(logger, "_EvalExpression success: {typename}".format(typename=rc.GetTypeName()))
return rc
def _EvalExpressionAsString(logger, ctx, expr):
result = _EvalExpression(logger, ctx, expr, None)
return result.GetSummary() or result.GetValue() or "--"
def _EvalAsNodeRef(logger, ctx, value):
return _EvalExpressionAsString(logger, ctx, "tvm::PrettyPrint({name})".format(name=value.name))
def NodeRef_SummaryProvider(value, _):
global g_indent
g_indent += 2
try:
if not value or not value.IsValid():
return "<invalid>"
lldb.formatters.Logger._lldb_formatters_debug_level = 0
logger = lldb.formatters.Logger.Logger()
ctx = _GetContext(lldb.debugger)
return _EvalAsNodeRef(logger, ctx, value)
except EvaluateError as e:
return str(e)
finally:
g_indent -= 2
| 5,972 | 30.436842 | 99 | py |
tvm | tvm-main/apps/dso_plugin_module/test_plugin_module.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import os
def test_plugin_module():
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
mod = tvm.runtime.load_module(os.path.join(curr_path, "lib", "plugin_module.so"))
# NOTE: we need to make sure all managed resources returned
# from mod get destructed before mod get unloaded.
#
# Failure mode we want to prevent from:
# We retain an object X whose destructor is within mod.
# The program will segfault if X get destructed after mod,
# because the destructor function has already been unloaded.
#
# The easiest way to achieve this is to wrap the
# logics related to mod inside a function.
def run_module(mod):
# normal functions
assert mod["AddOne"](10) == 11
assert mod["SubOne"](10) == 9
# advanced usecase: return a module
mymod = mod["CreateMyModule"](10)
fadd = mymod["add"]
assert fadd(10) == 20
assert mymod["mul"](10) == 100
run_module(mod)
if __name__ == "__main__":
test_plugin_module()
| 1,868 | 36.38 | 85 | py |
tvm | tvm-main/apps/microtvm/ethosu/convert_labels.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pathlib
import sys
from tvm.micro import copy_crt_config_header
def create_labels_header(labels_file, section, output_path):
"""
This function generates a header file containing the ImageNet labels as an array of strings
"""
labels_path = pathlib.Path(labels_file).resolve()
file_path = pathlib.Path(f"{output_path}/labels.h").resolve()
with open(labels_path) as f:
labels = f.readlines()
with open(file_path, "w") as header_file:
header_file.write(f'char* labels[] __attribute__((section("{section}"), aligned(16))) = {{')
for _, label in enumerate(labels):
header_file.write(f'"{label.rstrip()}",')
header_file.write("};\n")
if __name__ == "__main__":
create_labels_header(sys.argv[1], "ethosu_scratch", "./include")
crt_config_output_path = pathlib.Path(__file__).parent.resolve() / "build" / "crt_config"
if not crt_config_output_path.exists():
crt_config_output_path.mkdir()
copy_crt_config_header("crt", crt_config_output_path)
| 1,843 | 35.156863 | 100 | py |
tvm | tvm-main/apps/microtvm/ethosu/convert_image.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pathlib
import re
import sys
from PIL import Image
import numpy as np
def create_header_file(name, section, tensor_name, tensor_data, output_path):
"""
This function generates a header file containing the data from the numpy array provided.
"""
file_path = pathlib.Path(f"{output_path}/" + name).resolve()
# Create header file with npy_data as a C array
raw_path = file_path.with_suffix(".h").resolve()
with open(raw_path, "w") as header_file:
header_file.write(
"#include <tvmgen_default.h>\n"
+ f"const size_t {tensor_name}_len = {tensor_data.size};\n"
+ f'int8_t {tensor_name}[] __attribute__((section("{section}"), aligned(16))) = "'
)
data_hexstr = tensor_data.tobytes().hex()
for i in range(0, len(data_hexstr), 2):
header_file.write(f"\\x{data_hexstr[i:i+2]}")
header_file.write('";\n\n')
def create_headers(image_name):
"""
This function generates C header files for the input and output arrays required to run inferences
"""
img_path = os.path.join("./", f"{image_name}")
# Resize image to 224x224
resized_image = Image.open(img_path).resize((224, 224))
img_data = np.asarray(resized_image).astype("float32")
# # Add the batch dimension, as we are expecting 4-dimensional input: NCHW.
img_data = np.expand_dims(img_data, axis=0)
# Create input header file
input_data = img_data - 128
input_data = input_data.astype(np.int8)
create_header_file("inputs", "ethosu_scratch", "input", input_data, "./include")
# Create output header file
output_data = np.zeros([1001], np.int8)
create_header_file(
"outputs",
"output_data_sec",
"output",
output_data,
"./include",
)
if __name__ == "__main__":
create_headers(sys.argv[1])
| 2,666 | 34.092105 | 101 | py |
tvm | tvm-main/apps/microtvm/arduino/template_project/microtvm_api_server.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import logging
import os.path
import pathlib
import re
import shutil
import subprocess
import tarfile
import tempfile
import time
from string import Template
from packaging import version
import server
_LOG = logging.getLogger(__name__)
MODEL_LIBRARY_FORMAT_RELPATH = pathlib.Path("src") / "model" / "model.tar"
API_SERVER_DIR = pathlib.Path(os.path.dirname(__file__) or os.path.getcwd())
BUILD_DIR = API_SERVER_DIR / "build"
MODEL_LIBRARY_FORMAT_PATH = API_SERVER_DIR / MODEL_LIBRARY_FORMAT_RELPATH
IS_TEMPLATE = not (API_SERVER_DIR / MODEL_LIBRARY_FORMAT_RELPATH).exists()
MIN_ARDUINO_CLI_VERSION = version.parse("0.18.0")
BOARDS = API_SERVER_DIR / "boards.json"
ARDUINO_CLI_CMD = shutil.which("arduino-cli")
MAKEFILE_FILENAME = "Makefile"
# Data structure to hold the information microtvm_api_server.py needs
# to communicate with each of these boards.
try:
with open(BOARDS) as boards:
BOARD_PROPERTIES = json.load(boards)
except FileNotFoundError:
raise FileNotFoundError(f"Board file {{{BOARDS}}} does not exist.")
class BoardAutodetectFailed(Exception):
"""Raised when no attached hardware is found matching the requested board"""
PROJECT_TYPES = ["example_project", "host_driven"]
PROJECT_OPTIONS = server.default_project_options(
project_type={"choices": tuple(PROJECT_TYPES)},
board={"choices": list(BOARD_PROPERTIES), "optional": ["flash", "open_transport"]},
warning_as_error={"optional": ["build", "flash"]},
) + [
server.ProjectOption(
"arduino_cli_cmd",
required=(
["generate_project", "build", "flash", "open_transport"]
if not ARDUINO_CLI_CMD
else None
),
optional=(
["generate_project", "build", "flash", "open_transport"] if ARDUINO_CLI_CMD else None
),
type="str",
default=ARDUINO_CLI_CMD,
help="Path to the arduino-cli tool.",
),
server.ProjectOption(
"port",
optional=["flash", "open_transport"],
type="int",
default=None,
help=(
"Port to use for connecting to hardware. "
"If port and serial_number options are not set it will try to autodetect the port."
),
),
server.ProjectOption(
"serial_number",
optional=["open_transport", "flash"],
type="str",
default=None,
help=(
"Board serial number. If both serial_number and port options are set,"
" it will throw exception."
),
),
]
class Handler(server.ProjectAPIHandler):
def __init__(self):
super(Handler, self).__init__()
self._proc = None
self._port = None
self._serial = None
self._version = None
def server_info_query(self, tvm_version):
return server.ServerInfo(
platform_name="arduino",
is_template=IS_TEMPLATE,
model_library_format_path="" if IS_TEMPLATE else MODEL_LIBRARY_FORMAT_PATH,
project_options=PROJECT_OPTIONS,
)
def _copy_project_files(self, api_server_dir, project_dir, project_type):
"""Copies the files for project_type into project_dir.
Notes
-----
template_dir is NOT a project type, and that directory is never copied
in this function. template_dir only holds this file and its unit tests,
so this file is copied separately in generate_project.
"""
for item in (API_SERVER_DIR / "src" / project_type).iterdir():
if item.name == "project.ino":
continue
dest = project_dir / "src" / item.name
if item.is_dir():
shutil.copytree(item, dest)
else:
shutil.copy2(item, dest)
# Arduino requires the .ino file have the same filename as its containing folder
shutil.copy2(
API_SERVER_DIR / "src" / project_type / "project.ino",
project_dir / f"{project_dir.stem}.ino",
)
CRT_COPY_ITEMS = ("include", "src")
def _copy_standalone_crt(self, source_dir, standalone_crt_dir):
output_crt_dir = source_dir / "standalone_crt"
for item in self.CRT_COPY_ITEMS:
src_path = os.path.join(standalone_crt_dir, item)
dst_path = output_crt_dir / item
if os.path.isdir(src_path):
shutil.copytree(src_path, dst_path)
else:
shutil.copy2(src_path, dst_path)
# Example project is the "minimum viable project",
# and doesn't need a fancy RPC server
EXAMPLE_PROJECT_UNUSED_COMPONENTS = [
"include/dmlc",
"src/support",
"src/runtime/minrpc",
"src/runtime/crt/graph_executor",
"src/runtime/crt/microtvm_rpc_common",
"src/runtime/crt/microtvm_rpc_server",
"src/runtime/crt/tab",
]
def _remove_unused_components(self, source_dir, project_type):
unused_components = []
if project_type == "example_project":
unused_components = self.EXAMPLE_PROJECT_UNUSED_COMPONENTS
for component in unused_components:
shutil.rmtree(source_dir / "standalone_crt" / component)
def _disassemble_mlf(self, mlf_tar_path, source_dir):
with tempfile.TemporaryDirectory() as mlf_unpacking_dir_str:
mlf_unpacking_dir = pathlib.Path(mlf_unpacking_dir_str)
with tarfile.open(mlf_tar_path, "r:") as tar:
tar.extractall(mlf_unpacking_dir)
model_dir = source_dir / "model"
model_dir.mkdir()
# Copy C files from model. The filesnames and quantity
# depend on the target string, so we just copy all c files
source_dir = mlf_unpacking_dir / "codegen" / "host" / "src"
for file in source_dir.rglob("*.c"):
shutil.copy(file, model_dir)
# Return metadata.json for use in templating
with open(os.path.join(mlf_unpacking_dir, "metadata.json")) as f:
metadata = json.load(f)
return metadata
def _template_model(self, source_dir, metadata):
with open(source_dir / "platform.c", "r") as f:
model_h_template = Template(f.read())
all_module_names = []
for name in metadata["modules"].keys():
all_module_names.append(name)
assert all(
metadata["modules"][mod_name]["style"] == "full-model" for mod_name in all_module_names
), "when generating AOT, expect only full-model Model Library Format"
workspace_size_bytes = 0
for mod_name in all_module_names:
workspace_size_bytes += metadata["modules"][mod_name]["memory"]["functions"]["main"][0][
"workspace_size_bytes"
]
template_values = {
"workspace_size_bytes": workspace_size_bytes,
}
with open(source_dir / "platform.c", "w") as f:
f.write(model_h_template.substitute(template_values))
# Arduino ONLY recognizes .ino, .ccp, .c, .h
CPP_FILE_EXTENSION_SYNONYMS = ("cc", "cxx")
def _change_cpp_file_extensions(self, source_dir):
for ext in self.CPP_FILE_EXTENSION_SYNONYMS:
for filename in source_dir.rglob(f"*.{ext}"):
filename.rename(filename.with_suffix(".cpp"))
for filename in source_dir.rglob("*.inc"):
filename.rename(filename.with_suffix(".h"))
def _convert_includes(self, project_dir, source_dir):
"""Changes all #include statements in project_dir to be relevant to their
containing file's location.
Arduino only supports includes relative to a file's location, so this
function finds each time we #include a file and changes the path to
be relative to the file location. Does not do this for standard C
libraries. Also changes angle brackets syntax to double quotes syntax.
See Also
-----
https://www.arduino.cc/reference/en/language/structure/further-syntax/include/
"""
for ext in ("c", "h", "cpp"):
for filename in source_dir.rglob(f"*.{ext}"):
with filename.open("rb") as src_file:
lines = src_file.readlines()
with filename.open("wb") as dst_file:
for line in lines:
line_str = str(line, "utf-8")
# Check if line has an include
result = re.search(r"#include\s*[<\"]([^>]*)[>\"]", line_str)
if not result:
dst_file.write(line)
else:
new_include = self._find_modified_include_path(
project_dir, filename, result.groups()[0]
)
updated_line = f'#include "{new_include}"\n'
dst_file.write(updated_line.encode("utf-8"))
# Most of the files we used to be able to point to directly are under "src/standalone_crt/include/".
# Howver, crt_config.h lives under "src/standalone_crt/crt_config/", and more exceptions might
# be added in the future.
POSSIBLE_BASE_PATHS = ["src/standalone_crt/include/", "src/standalone_crt/crt_config/"]
def _find_modified_include_path(self, project_dir, file_path, include_path):
"""Takes a single #include path, and returns the location it should point to.
Examples
--------
>>> _find_modified_include_path(
... "/path/to/project/dir"
... "/path/to/project/dir/src/standalone_crt/src/runtime/crt/common/ndarray.c"
... "tvm/runtime/crt/platform.h"
... )
"../../../../../../src/standalone_crt/include/tvm/runtime/crt/platform.h"
"""
if include_path.endswith(".inc"):
include_path = re.sub(r"\.[a-z]+$", ".h", include_path)
# Change includes referencing .cc and .cxx files to point to the renamed .cpp file
if include_path.endswith(self.CPP_FILE_EXTENSION_SYNONYMS):
include_path = re.sub(r"\.[a-z]+$", ".cpp", include_path)
# If the include already works, don't modify it
if (file_path.parents[0] / include_path).exists():
return include_path
relative_path = file_path.relative_to(project_dir)
up_dirs_path = "../" * str(relative_path).count("/")
for base_path in self.POSSIBLE_BASE_PATHS:
full_potential_path = project_dir / base_path / include_path
if full_potential_path.exists():
return up_dirs_path + base_path + include_path
# If we can't find the file, just leave it untouched
# It's probably a standard C/C++ header
return include_path
CMSIS_INCLUDE_HEADERS = [
"arm_nn_math_types.h",
"arm_nn_tables.h",
"arm_nn_types.h",
"arm_nnfunctions.h",
"arm_nnsupportfunctions.h",
]
def _cmsis_required(self, project_path: pathlib.Path) -> bool:
"""Check if CMSIS dependency is required."""
project_path = pathlib.Path(project_path)
for path in (project_path / "src" / "model").iterdir():
if path.is_file():
# Encoding is for reading C generated code which also includes hex numbers
with open(path, "r", encoding="ISO-8859-1") as lib_f:
lib_content = lib_f.read()
if any(header in lib_content for header in self.CMSIS_INCLUDE_HEADERS):
return True
return False
def _copy_cmsis(self, project_path: pathlib.Path, cmsis_path: str):
"""Copy CMSIS header files to project.
Note: We use this CMSIS package:https://www.arduino.cc/reference/en/libraries/arduino_cmsis-dsp/
However, the latest release does not include header files that are copied in this function.
"""
(project_path / "include" / "cmsis").mkdir()
cmsis_path = pathlib.Path(cmsis_path)
for item in self.CMSIS_INCLUDE_HEADERS:
shutil.copy2(
cmsis_path / "CMSIS" / "NN" / "Include" / item,
project_path / "include" / "cmsis" / item,
)
def _populate_makefile(
self,
makefile_template_path: pathlib.Path,
makefile_path: pathlib.Path,
board: str,
verbose: bool,
arduino_cli_cmd: str,
build_extra_flags: str,
):
"""Generate Makefile from template."""
flags = {
"FQBN": self._get_fqbn(board),
"VERBOSE_FLAG": "--verbose" if verbose else "",
"ARUINO_CLI_CMD": arduino_cli_cmd,
"BOARD": board,
"BUILD_EXTRA_FLAGS": build_extra_flags,
}
with open(makefile_path, "w") as makefile_f:
with open(makefile_template_path, "r") as makefile_template_f:
for line in makefile_template_f:
SUBST_TOKEN_RE = re.compile(r"<([A-Z_]+)>")
outs = []
for i, m in enumerate(re.split(SUBST_TOKEN_RE, line)):
if i % 2 == 1:
m = flags[m]
outs.append(m)
line = "".join(outs)
makefile_f.write(line)
def generate_project(self, model_library_format_path, standalone_crt_dir, project_dir, options):
# List all used project options
board = options["board"]
project_type = options["project_type"]
arduino_cli_cmd = options["arduino_cli_cmd"]
verbose = options["verbose"]
cmsis_path = options.get("cmsis_path")
compile_definitions = options.get("compile_definitions")
extra_files_tar = options.get("extra_files_tar")
# Reference key directories with pathlib
project_dir = pathlib.Path(project_dir)
project_dir.mkdir()
source_dir = project_dir / "src"
source_dir.mkdir()
# Copies files from the template folder to project_dir
for file in os.listdir(API_SERVER_DIR):
if file.endswith(".py"):
shutil.copy2(API_SERVER_DIR / file, project_dir / file)
# Copy launch script
shutil.copy2(
API_SERVER_DIR / "launch_microtvm_api_server.sh",
project_dir / "launch_microtvm_api_server.sh",
)
shutil.copy2(BOARDS, project_dir / BOARDS.name)
self._copy_project_files(API_SERVER_DIR, project_dir, project_type)
# Copy standalone_crt into src folder
self._copy_standalone_crt(source_dir, standalone_crt_dir)
self._remove_unused_components(source_dir, project_type)
# Populate crt-config.h
crt_config_dir = project_dir / "src" / "standalone_crt" / "crt_config"
crt_config_dir.mkdir()
shutil.copy2(
API_SERVER_DIR / "crt_config" / "crt_config.h", crt_config_dir / "crt_config.h"
)
# Unpack the MLF and copy the relevant files
metadata = self._disassemble_mlf(model_library_format_path, source_dir)
shutil.copy2(model_library_format_path, project_dir / MODEL_LIBRARY_FORMAT_RELPATH)
# For AOT, template platform.c with metadata to minimize space usage
if project_type == "example_project":
self._template_model(source_dir, metadata)
self._change_cpp_file_extensions(source_dir)
# Recursively change includes
self._convert_includes(project_dir, source_dir)
# create include directory
(project_dir / "include").mkdir()
# Populate extra_files
if extra_files_tar:
with tarfile.open(extra_files_tar, mode="r:*") as tf:
tf.extractall(project_dir)
build_extra_flags = '"build.extra_flags='
if extra_files_tar:
build_extra_flags += "-I./include "
if compile_definitions:
for item in compile_definitions:
build_extra_flags += f"{item} "
if self._cmsis_required(project_dir):
build_extra_flags += f"-I./include/cmsis "
self._copy_cmsis(project_dir, cmsis_path)
build_extra_flags += '"'
# Check if build_extra_flags is empty
if build_extra_flags == '"build.extra_flags="':
build_extra_flags = '""'
# Populate Makefile
self._populate_makefile(
API_SERVER_DIR / f"{MAKEFILE_FILENAME}.template",
project_dir / MAKEFILE_FILENAME,
board,
verbose,
arduino_cli_cmd,
build_extra_flags,
)
def _get_platform_version(self, arduino_cli_path: str) -> float:
# sample output of this command:
# 'arduino-cli alpha Version: 0.18.3 Commit: d710b642 Date: 2021-05-14T12:36:58Z\n'
version_output = subprocess.run(
[arduino_cli_path, "version"], check=True, stdout=subprocess.PIPE
).stdout.decode("utf-8")
str_version = re.search(r"Version: ([\.0-9]*)", version_output).group(1)
# Using too low a version should raise an error. Note that naively
# comparing floats will fail here: 0.7 > 0.21, but 0.21 is a higher
# version (hence we need version.parse)
return version.parse(str_version)
# This will only be run for build and upload
def _check_platform_version(self, cli_command: str, warning_as_error: bool):
if not self._version:
self._version = self._get_platform_version(cli_command)
if self._version < MIN_ARDUINO_CLI_VERSION:
message = (
f"Arduino CLI version too old: found {self._version}, "
f"need at least {str(MIN_ARDUINO_CLI_VERSION)}."
)
if warning_as_error is not None and warning_as_error:
raise server.ServerError(message=message)
_LOG.warning(message)
def _get_fqbn(self, board: str):
o = BOARD_PROPERTIES[board]
return f"{o['package']}:{o['architecture']}:{o['board']}"
def build(self, options):
# List all used project options
arduino_cli_cmd = options["arduino_cli_cmd"]
warning_as_error = options.get("warning_as_error")
self._check_platform_version(arduino_cli_cmd, warning_as_error)
compile_cmd = ["make", "build"]
# Specify project to compile
subprocess.run(compile_cmd, check=True, cwd=API_SERVER_DIR)
POSSIBLE_BOARD_LIST_HEADERS = ("Port", "Protocol", "Type", "Board Name", "FQBN", "Core")
def _parse_connected_boards(self, tabular_str):
"""Parses the tabular output from `arduino-cli board list` into a 2D array
Examples
--------
>>> list(_parse_connected_boards(bytes(
... "Port Type Board Name FQBN Core \n"
... "/dev/ttyS4 Serial Port Unknown \n"
... "/dev/ttyUSB0 Serial Port (USB) Spresense SPRESENSE:spresense:spresense SPRESENSE:spresense\n"
... "\n",
... "utf-8")))
[['/dev/ttys4', 'Serial Port', 'Unknown', '', ''], ['/dev/ttyUSB0', 'Serial Port (USB)',
'Spresense', 'SPRESENSE:spresense:spresense', 'SPRESENSE:spresense']]
"""
# Which column headers are present depends on the version of arduino-cli
column_regex = r"\s*|".join(self.POSSIBLE_BOARD_LIST_HEADERS) + r"\s*"
str_rows = tabular_str.split("\n")
column_headers = list(re.finditer(column_regex, str_rows[0]))
assert len(column_headers) > 0
for str_row in str_rows[1:]:
if not str_row.strip():
continue
device = {}
for column in column_headers:
col_name = column.group(0).strip().lower()
device[col_name] = str_row[column.start() : column.end()].strip()
yield device
def _auto_detect_port(self, arduino_cli_cmd: str, board: str) -> str:
# It is assumed only one board with this type is connected to this host machine.
list_cmd = [arduino_cli_cmd, "board", "list"]
list_cmd_output = subprocess.run(
list_cmd, check=True, stdout=subprocess.PIPE
).stdout.decode("utf-8")
desired_fqbn = self._get_fqbn(board)
for device in self._parse_connected_boards(list_cmd_output):
if device["fqbn"] == desired_fqbn:
return device["port"]
# If no compatible boards, raise an error
raise BoardAutodetectFailed()
def _get_arduino_port(
self, arduino_cli_cmd: str, board: str, port: int = None, serial_number: str = None
):
"""Returns Arduino serial port.
If both port and serial_number are set, it throw Runtime exception.
If none of those options are set, it tries to autodetect the serial port.
"""
# TODO: This is to avoid breaking GPU docker on running the tutorials.
import serial.tools.list_ports
if serial_number and port:
raise RuntimeError(
"port and serial_number cannot be set together. Please set only one."
)
if not self._port:
if port:
self._port = port
elif serial_number:
com_ports = serial.tools.list_ports.comports()
for port in com_ports:
if port.serial_number == serial_number:
self._port = port.device
break
if not self._port:
raise BoardAutodetectFailed(
f"Detecting port with board serial_number {serial_number} failed."
)
else:
self._port = self._auto_detect_port(arduino_cli_cmd, board)
return self._port
def _get_board_from_makefile(self, makefile_path: pathlib.Path) -> str:
"""Get Board from generated Makefile."""
with open(makefile_path) as makefile_f:
lines = makefile_f.readlines()
for line in lines:
if "BOARD" in line:
board = re.sub(r"\s", "", line).split(":=")[1]
return board
raise RuntimeError("Board was not found in Makefile: {}".format(makefile_path))
FLASH_TIMEOUT_SEC = 60
FLASH_MAX_RETRIES = 5
def flash(self, options):
# List all used project options
arduino_cli_cmd = options["arduino_cli_cmd"]
warning_as_error = options.get("warning_as_error")
port = options.get("port")
board = options.get("board")
serial_number = options.get("serial_number")
if not board:
board = self._get_board_from_makefile(API_SERVER_DIR / MAKEFILE_FILENAME)
self._check_platform_version(arduino_cli_cmd, warning_as_error)
port = self._get_arduino_port(arduino_cli_cmd, board, port, serial_number)
upload_cmd = ["make", "flash", f"PORT={port}"]
for _ in range(self.FLASH_MAX_RETRIES):
try:
subprocess.run(
upload_cmd, check=True, timeout=self.FLASH_TIMEOUT_SEC, cwd=API_SERVER_DIR
)
break
# We only catch timeout errors - a subprocess.CalledProcessError
# (caused by subprocess.run returning non-zero code) will not
# be caught.
except subprocess.TimeoutExpired:
_LOG.warning(
f"Upload attempt to port {port} timed out after {self.FLASH_TIMEOUT_SEC} seconds"
)
else:
raise RuntimeError(
f"Unable to flash Arduino board after {self.FLASH_MAX_RETRIES} attempts"
)
def open_transport(self, options):
# TODO: This is to avoid breaking GPU docker on running the tutorials.
import serial
import serial.tools.list_ports
# List all used project options
arduino_cli_cmd = options["arduino_cli_cmd"]
port = options.get("port")
board = options.get("board")
serial_number = options.get("serial_number")
if not board:
board = self._get_board_from_makefile(API_SERVER_DIR / MAKEFILE_FILENAME)
# Zephyr example doesn't throw an error in this case
if self._serial is not None:
return
port = self._get_arduino_port(arduino_cli_cmd, board, port, serial_number)
# It takes a moment for the Arduino code to finish initializing
# and start communicating over serial
for _ in range(10):
if any(serial.tools.list_ports.grep(port)):
break
time.sleep(0.5)
self._serial = serial.Serial(port, baudrate=115200, timeout=10)
return server.TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=5.0,
session_established_timeout_sec=5.0,
)
def close_transport(self):
if self._serial is None:
return
self._serial.close()
self._serial = None
def read_transport(self, n, timeout_sec):
self._serial.timeout = timeout_sec
if self._serial is None:
raise server.TransportClosedError()
return self._serial.read(n)
def write_transport(self, data, timeout_sec):
self._serial.write_timeout = timeout_sec
if self._serial is None:
raise server.TransportClosedError()
return self._serial.write(data)
if __name__ == "__main__":
server.main(Handler())
| 26,611 | 37.568116 | 111 | py |
tvm | tvm-main/apps/microtvm/cmsisnn/convert_image.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pathlib
import re
import sys
from PIL import Image
import numpy as np
from tvm.micro import copy_crt_config_header
def create_header_file(name, tensor_name, tensor_data, output_path):
"""
This function generates a header file containing the data from the numpy array provided.
"""
file_path = pathlib.Path(f"{output_path}/" + name).resolve()
# Create header file with npy_data as a C array
raw_path = file_path.with_suffix(".h").resolve()
with open(raw_path, "w") as header_file:
header_file.write(
"\n"
+ f"const size_t {tensor_name}_len = {tensor_data.size};\n"
+ f'__attribute__((section(".data.tvm"), aligned(16))) int8_t {tensor_name}[] = "'
)
data_hexstr = tensor_data.tobytes().hex()
for i in range(0, len(data_hexstr), 2):
header_file.write(f"\\x{data_hexstr[i:i+2]}")
header_file.write('";\n\n')
def create_headers(image_name):
"""
This function generates C header files for the input and output arrays required to run inferences
"""
img_path = os.path.join("./", f"{image_name}")
# Resize image to 224x224
resized_image = Image.open(img_path).resize((224, 224))
img_data = np.asarray(resized_image).astype("float32")
# # Add the batch dimension, as we are expecting 4-dimensional input: NCHW.
img_data = np.expand_dims(img_data, axis=0)
# Create input header file
input_data = img_data - 128
input_data = input_data.astype(np.int8)
create_header_file("inputs", "input", input_data, "./include")
# Create output header file
output_data = np.zeros([2], np.int8)
create_header_file(
"outputs",
"output",
output_data,
"./include",
)
if __name__ == "__main__":
create_headers(sys.argv[1])
# Generate crt_config.h
crt_config_output_path = pathlib.Path(__file__).parent.resolve() / "build" / "crt_config"
if not crt_config_output_path.exists():
crt_config_output_path.mkdir()
copy_crt_config_header("crt", crt_config_output_path)
| 2,892 | 33.855422 | 101 | py |
tvm | tvm-main/apps/microtvm/reference-vm/base-box-tool.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import copy
import json
import logging
import pathlib
import os
import re
import shlex
import shutil
import subprocess
import sys
import pathlib
_LOG = logging.getLogger(__name__)
THIS_DIR = pathlib.Path(os.path.realpath(os.path.dirname(__file__)))
# List of vagrant providers supported by this tool
ALL_PROVIDERS = (
"parallels",
"virtualbox",
"vmware_desktop",
)
# List of supported electronics platforms. Each must correspond
# to a sub-directory of this directory.
ALL_PLATFORMS = (
"arduino",
"zephyr",
)
# Extra scripts required to execute on provisioning
# in [platform]/base-box/base_box_provision.sh
EXTRA_SCRIPTS = [
"apps/microtvm/reference-vm/base-box/base_box_setup_common.sh",
"docker/install/ubuntu_install_core.sh",
"docker/install/ubuntu_install_python.sh",
"docker/utils/apt-install-and-clear.sh",
"docker/install/ubuntu2204_install_llvm.sh",
# Zephyr
"docker/install/ubuntu_init_zephyr_project.sh",
"docker/install/ubuntu_install_zephyr_sdk.sh",
"docker/install/ubuntu_install_cmsis.sh",
"docker/install/ubuntu_install_nrfjprog.sh",
]
PACKER_FILE_NAME = "packer.json"
# List of identifying strings for microTVM boards for testing.
with open(THIS_DIR / ".." / "zephyr" / "template_project" / "boards.json") as f:
zephyr_boards = json.load(f)
with open(THIS_DIR / ".." / "arduino" / "template_project" / "boards.json") as f:
arduino_boards = json.load(f)
ALL_MICROTVM_BOARDS = {
"arduino": arduino_boards.keys(),
"zephyr": zephyr_boards.keys(),
}
def parse_virtualbox_devices():
output = subprocess.check_output(["VBoxManage", "list", "usbhost"], encoding="utf-8")
devices = []
current_dev = {}
for line in output.split("\n"):
if not line.strip():
if current_dev:
if "VendorId" in current_dev and "ProductId" in current_dev:
devices.append(current_dev)
current_dev = {}
continue
key, value = line.split(":", 1)
value = value.lstrip(" ")
current_dev[key] = value
if current_dev:
devices.append(current_dev)
return devices
VIRTUALBOX_USB_DEVICE_RE = (
"USBAttachVendorId[0-9]+=0x([0-9a-z]{4})\n" + "USBAttachProductId[0-9]+=0x([0-9a-z]{4})"
)
def parse_virtualbox_attached_usb_devices(vm_uuid):
output = subprocess.check_output(
["VBoxManage", "showvminfo", "--machinereadable", vm_uuid], encoding="utf-8"
)
r = re.compile(VIRTUALBOX_USB_DEVICE_RE)
attached_usb_devices = r.findall(output, re.MULTILINE)
# List of couples (VendorId, ProductId) for all attached USB devices
return attached_usb_devices
VIRTUALBOX_VID_PID_RE = re.compile(r"0x([0-9A-Fa-f]{4}).*")
def attach_virtualbox(vm_uuid, vid_hex=None, pid_hex=None, serial=None):
usb_devices = parse_virtualbox_devices()
for dev in usb_devices:
m = VIRTUALBOX_VID_PID_RE.match(dev["VendorId"])
if not m:
_LOG.warning("Malformed VendorId: %s", dev["VendorId"])
continue
dev_vid_hex = m.group(1).lower()
m = VIRTUALBOX_VID_PID_RE.match(dev["ProductId"])
if not m:
_LOG.warning("Malformed ProductId: %s", dev["ProductId"])
continue
dev_pid_hex = m.group(1).lower()
if (
vid_hex == dev_vid_hex
and pid_hex == dev_pid_hex
and (serial is None or serial == dev["SerialNumber"])
):
attached_devices = parse_virtualbox_attached_usb_devices(vm_uuid)
for vid, pid in parse_virtualbox_attached_usb_devices(vm_uuid):
if vid_hex == vid and pid_hex == pid:
print(f"USB dev {vid_hex}:{pid_hex} already attached. Skipping attach.")
return
rule_args = [
"VBoxManage",
"usbfilter",
"add",
"0",
"--action",
"hold",
"--name",
"test device",
"--target",
vm_uuid,
"--vendorid",
vid_hex,
"--productid",
pid_hex,
]
if serial is not None:
rule_args.extend(["--serialnumber", serial])
subprocess.check_call(rule_args)
subprocess.check_call(["VBoxManage", "controlvm", vm_uuid, "usbattach", dev["UUID"]])
return
raise Exception(
f"Device with vid={vid_hex}, pid={pid_hex}, serial={serial!r} not found:\n{usb_devices!r}"
)
def attach_parallels(uuid, vid_hex=None, pid_hex=None, serial=None):
usb_devices = json.loads(
subprocess.check_output(["prlsrvctl", "usb", "list", "-j"], encoding="utf-8")
)
for dev in usb_devices:
_, dev_vid_hex, dev_pid_hex, _, _, dev_serial = dev["System name"].split("|")
dev_vid_hex = dev_vid_hex.lower()
dev_pid_hex = dev_pid_hex.lower()
if (
vid_hex == dev_vid_hex
and pid_hex == dev_pid_hex
and (serial is None or serial == dev_serial)
):
subprocess.check_call(["prlsrvctl", "usb", "set", dev["Name"], uuid])
if "Used-By-Vm-Name" in dev:
subprocess.check_call(
["prlctl", "set", dev["Used-By-Vm-Name"], "--device-disconnect", dev["Name"]]
)
subprocess.check_call(["prlctl", "set", uuid, "--device-connect", dev["Name"]])
return
raise Exception(
f"Device with vid={vid_hex}, pid={pid_hex}, serial={serial!r} not found:\n{usb_devices!r}"
)
def attach_vmware(uuid, vid_hex=None, pid_hex=None, serial=None):
print("NOTE: vmware doesn't seem to support automatic attaching of devices :(")
print("The VMWare VM UUID is {uuid}")
print("Please attach the following usb device using the VMWare GUI:")
if vid_hex is not None:
print(f" - VID: {vid_hex}")
if pid_hex is not None:
print(f" - PID: {pid_hex}")
if serial is not None:
print(f" - Serial: {serial}")
if vid_hex is None and pid_hex is None and serial is None:
print(" - (no specifications given for USB device)")
print()
print("Press [Enter] when the USB device is attached")
input()
ATTACH_USB_DEVICE = {
"parallels": attach_parallels,
"virtualbox": attach_virtualbox,
"vmware_desktop": attach_vmware,
}
def generate_packer_config(file_path, providers):
builders = []
provisioners = []
for provider_name in providers:
builders.append(
{
"name": f"{provider_name}",
"type": "vagrant",
"box_name": f"microtvm-base-{provider_name}",
"output_dir": f"output-packer-{provider_name}",
"communicator": "ssh",
"source_path": "generic/ubuntu1804",
"provider": provider_name,
"template": "Vagrantfile.packer-template",
}
)
repo_root = subprocess.check_output(
["git", "rev-parse", "--show-toplevel"], encoding="utf-8"
).strip()
scripts_to_copy = EXTRA_SCRIPTS
for script in scripts_to_copy:
script_path = os.path.join(repo_root, script)
filename = os.path.basename(script_path)
provisioners.append({"type": "file", "source": script_path, "destination": f"~/{filename}"})
provisioners.append(
{
"type": "shell",
"script": "base_box_setup.sh",
}
)
provisioners.append(
{
"type": "shell",
"script": "base_box_provision.sh",
}
)
with open(file_path, "w") as f:
json.dump(
{
"builders": builders,
"provisioners": provisioners,
},
f,
sort_keys=True,
indent=2,
)
def build_command(args):
base_box_dir = THIS_DIR / "base-box"
generate_packer_config(
os.path.join(base_box_dir, PACKER_FILE_NAME),
args.provider or ALL_PROVIDERS,
)
env = copy.copy(os.environ)
packer_args = ["packer", "build", "-force"]
env["PACKER_LOG"] = "1"
env["PACKER_LOG_PATH"] = "packer.log"
if args.debug_packer:
packer_args += ["-debug"]
packer_args += [PACKER_FILE_NAME]
box_package_exists = False
if not args.force:
box_package_dirs = [(base_box_dir / f"output-packer-{p}") for p in args.provider]
for box_package_dir in box_package_dirs:
if box_package_dir.exists():
print(f"A box package {box_package_dir} already exists. Refusing to overwrite it!")
box_package_exists = True
if box_package_exists:
sys.exit("One or more box packages exist (see list above). To rebuild use '--force'")
subprocess.check_call(packer_args, cwd=THIS_DIR / "base-box", env=env)
REQUIRED_TEST_CONFIG_KEYS = {
"vid_hex": str,
"pid_hex": str,
}
VM_BOX_RE = re.compile(r'(.*\.vm\.box) = "(.*)"')
VM_TVM_HOME_RE = re.compile(r'(.*tvm_home) = "(.*)"')
# Paths, relative to the platform box directory, which will not be copied to release-test dir.
SKIP_COPY_PATHS = [".vagrant", "base-box", "scripts"]
def do_build_release_test_vm(
release_test_dir, user_box_dir: pathlib.Path, base_box_dir: pathlib.Path, provider_name
):
if os.path.exists(release_test_dir):
try:
subprocess.check_call(["vagrant", "destroy", "-f"], cwd=release_test_dir)
except subprocess.CalledProcessError:
_LOG.warning("vagrant destroy failed--removing dirtree anyhow", exc_info=True)
shutil.rmtree(release_test_dir)
for dirpath, _, filenames in os.walk(user_box_dir):
rel_path = os.path.relpath(dirpath, user_box_dir)
if any(
rel_path == scp or rel_path.startswith(f"{scp}{os.path.sep}") for scp in SKIP_COPY_PATHS
):
continue
dest_dir = os.path.join(release_test_dir, rel_path)
os.makedirs(dest_dir)
for filename in filenames:
shutil.copy2(os.path.join(dirpath, filename), os.path.join(dest_dir, filename))
release_test_vagrantfile = os.path.join(release_test_dir, "Vagrantfile")
with open(release_test_vagrantfile) as f:
lines = list(f)
found_box_line = False
with open(release_test_vagrantfile, "w") as f:
for line in lines:
# Skip setting version
if "config.vm.box_version" in line:
continue
m = VM_BOX_RE.match(line)
tvm_home_m = VM_TVM_HOME_RE.match(line)
if tvm_home_m:
# Adjust tvm home for testing step
f.write(f'{tvm_home_m.group(1)} = "../../../.."\n')
continue
if not m:
f.write(line)
continue
box_package = os.path.join(
base_box_dir, f"output-packer-{provider_name}", "package.box"
)
box_relpath = os.path.relpath(box_package, release_test_dir)
f.write(f'{m.group(1)} = "{box_relpath}"\n')
found_box_line = True
if not found_box_line:
_LOG.error(
"testing provider %s: couldn't find config.box.vm = line in Vagrantfile; unable to test",
provider_name,
)
return False
# Delete the old box registered with Vagrant, which may lead to a falsely-passing release test.
remove_args = ["vagrant", "box", "remove", box_relpath]
return_code = subprocess.call(remove_args, cwd=release_test_dir)
assert return_code in (0, 1), f'{" ".join(remove_args)} returned exit code {return_code}'
subprocess.check_call(["vagrant", "up", f"--provider={provider_name}"], cwd=release_test_dir)
return True
def do_run_release_test(release_test_dir, provider_name, test_config, test_device_serial):
with open(
os.path.join(release_test_dir, ".vagrant", "machines", "default", provider_name, "id")
) as f:
machine_uuid = f.read()
# Check if target is not QEMU
if test_config["vid_hex"] and test_config["pid_hex"]:
ATTACH_USB_DEVICE[provider_name](
machine_uuid,
vid_hex=test_config["vid_hex"],
pid_hex=test_config["pid_hex"],
serial=test_device_serial,
)
tvm_home = os.path.realpath(THIS_DIR / ".." / ".." / "..")
def _quote_cmd(cmd):
return " ".join(shlex.quote(a) for a in cmd)
test_cmd = (
_quote_cmd(["cd", tvm_home])
+ " && "
+ _quote_cmd(
[
f"apps/microtvm/reference-vm/base-box/base_box_test.sh",
test_config["microtvm_board"],
]
)
)
subprocess.check_call(["vagrant", "ssh", "-c", f"bash -ec '{test_cmd}'"], cwd=release_test_dir)
def test_command(args):
user_box_dir = THIS_DIR
base_box_dir = user_box_dir / "base-box"
boards_file = THIS_DIR / ".." / args.platform / "template_project" / "boards.json"
with open(boards_file) as f:
test_config = json.load(f)
# select microTVM test config
microtvm_test_config = test_config[args.microtvm_board]
for key, expected_type in REQUIRED_TEST_CONFIG_KEYS.items():
assert key in microtvm_test_config and isinstance(
microtvm_test_config[key], expected_type
), f"Expected key {key} of type {expected_type} in {boards_file}: {test_config!r}"
microtvm_test_config["vid_hex"] = microtvm_test_config["vid_hex"].lower()
microtvm_test_config["pid_hex"] = microtvm_test_config["pid_hex"].lower()
microtvm_test_config["microtvm_board"] = args.microtvm_board
providers = args.provider
release_test_dir = THIS_DIR / f"release-test"
if args.skip_build or args.skip_destroy:
assert (
len(providers) == 1
), "--skip-build and/or --skip-destroy was given, but >1 provider specified"
test_failed = False
for provider_name in providers:
try:
if not args.skip_build:
do_build_release_test_vm(
release_test_dir, user_box_dir, base_box_dir, provider_name
)
do_run_release_test(
release_test_dir,
provider_name,
microtvm_test_config,
args.test_device_serial,
)
except subprocess.CalledProcessError:
test_failed = True
sys.exit(
f"\n\nERROR: Provider '{provider_name}' failed the release test. "
"You can re-run it to reproduce the issue without building everything "
"again by passing the --skip-build and specifying only the provider that failed. "
"The VM is still running in case you want to connect it via SSH to "
"investigate further the issue, thus it's necessary to destroy it manually "
"to release the resources back to the host, like a USB device attached to the VM."
)
finally:
# if we reached out here do_run_release_test() succeeded, hence we can
# destroy the VM and release the resources back to the host if user haven't
# requested to not destroy it.
if not (args.skip_destroy or test_failed):
subprocess.check_call(["vagrant", "destroy", "-f"], cwd=release_test_dir)
shutil.rmtree(release_test_dir)
print(f'\n\nThe release tests passed on all specified providers: {", ".join(providers)}.')
def release_command(args):
if args.release_full_name:
vm_name = args.release_full_name
else:
vm_name = "tlcpack/microtvm"
if not args.skip_creating_release_version:
subprocess.check_call(
[
"vagrant",
"cloud",
"version",
"create",
vm_name,
args.release_version,
]
)
if not args.release_version:
sys.exit(f"--release-version must be specified")
for provider_name in args.provider:
subprocess.check_call(
[
"vagrant",
"cloud",
"publish",
"-f",
vm_name,
args.release_version,
provider_name,
str(THIS_DIR / "base-box" / f"output-packer-{provider_name}/package.box"),
]
)
def parse_args():
parser = argparse.ArgumentParser(
description="Automates building, testing, and releasing a base box"
)
subparsers = parser.add_subparsers(help="Action to perform.")
subparsers.required = True
subparsers.dest = "action"
parser.add_argument(
"--provider",
choices=ALL_PROVIDERS,
action="append",
required=True,
help="Name of the provider or providers to act on",
)
# "test" has special options for different platforms, and "build", "release" might
# in the future, so we'll add the platform argument to each one individually.
platform_help_str = "Platform to use (e.g. Arduino, Zephyr)"
# Options for build subcommand
parser_build = subparsers.add_parser("build", help="Build a base box.")
parser_build.set_defaults(func=build_command)
parser_build.add_argument(
"--debug-packer",
action="store_true",
help=("Run packer in debug mode, and write log to the base-box directory."),
)
parser_build.add_argument(
"--force",
action="store_true",
help=("Force rebuilding a base box from scratch if one already exists."),
)
# Options for test subcommand
parser_test = subparsers.add_parser("test", help="Test a base box before release.")
parser_test.set_defaults(func=test_command)
parser_test.add_argument(
"--skip-build",
action="store_true",
help=(
"If given, assume a box has already been built in the release-test subdirectory, "
"so use that box to execute the release test script. If the tests fail the VM used "
"for testing will be left running for further investigation and will need to be "
"destroyed manually. If all tests pass on all specified providers no VM is left running, "
"unless --skip-destroy is given too."
),
)
parser_test.add_argument(
"--skip-destroy",
action="store_true",
help=(
"Skip destroying the test VM even if all tests pass. Can only be used if a single "
"provider is specified. Default is to destroy the VM if all tests pass (and always "
"skip destroying it if a test fails)."
),
)
parser_test.add_argument(
"--test-device-serial",
help=(
"If given, attach the test device with this USB serial number. Corresponds to the "
"iSerial field from `lsusb -v` output."
),
)
parser_test_platform_subparsers = parser_test.add_subparsers(help=platform_help_str)
for platform in ALL_PLATFORMS:
platform_specific_parser = parser_test_platform_subparsers.add_parser(platform)
platform_specific_parser.set_defaults(platform=platform)
platform_specific_parser.add_argument(
"--microtvm-board",
choices=ALL_MICROTVM_BOARDS[platform],
required=True,
help="MicroTVM board used for testing.",
)
# Options for release subcommand
parser_release = subparsers.add_parser("release", help="Release base box to cloud.")
parser_release.set_defaults(func=release_command)
parser_release.add_argument(
"--release-version",
required=True,
help="Version to release, in the form 'x.y.z'. Must be specified with release.",
)
parser_release.add_argument(
"--skip-creating-release-version",
action="store_true",
help="Skip creating the version and just upload for this provider.",
)
parser_release.add_argument(
"--release-full-name",
required=False,
type=str,
default=None,
help=(
"If set, it will use this as the full release name and version for the box. "
"If this set, it will ignore `--release-version`."
),
)
args = parser.parse_args()
return args
def main():
args = parse_args()
args.func(args)
if __name__ == "__main__":
main()
| 21,466 | 33.020602 | 102 | py |
tvm | tvm-main/apps/microtvm/zephyr_cmsisnn/model/convert_labels.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pathlib
import sys
import shutil
import pathlib
from tvm.micro import copy_crt_config_header
def create_labels_header(labels_file, output_path):
"""
This function generates a header file containing the ImageNet labels as an array of strings
"""
labels_path = pathlib.Path(labels_file).resolve()
file_path = pathlib.Path(f"{output_path}/labels.c").resolve()
with open(labels_path) as f:
labels = f.readlines()
with open(file_path, "w") as header_file:
header_file.write(f"char* labels[] = {{")
for _, label in enumerate(labels):
header_file.write(f'"{label.rstrip()}",')
header_file.write("};\n")
def prepare_crt_config():
crt_config_output_path = (
pathlib.Path(__file__).parent.resolve().parent() / "build" / "crt_config"
)
if not crt_config_output_path.exists():
crt_config_output_path.mkdir()
copy_crt_config_header("zephyr", crt_config_output_path)
if __name__ == "__main__":
create_labels_header(sys.argv[1], sys.argv[2])
prepare_crt_config()
| 1,874 | 31.327586 | 95 | py |
tvm | tvm-main/apps/microtvm/zephyr_cmsisnn/model/convert_input.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pathlib
import sys
import numpy as np
def create_file(name, prefix, tensor_name, tensor_data, output_path):
"""
This function generates a header file containing the data from the numpy array provided.
"""
file_path = pathlib.Path(f"{output_path}/" + name).resolve()
# Create header file with npy_data as a C array
raw_path = file_path.with_suffix(".c").resolve()
with open(raw_path, "w") as header_file:
header_file.write(
"#include <stddef.h>\n"
"#include <stdint.h>\n"
f"const size_t {tensor_name}_len = {tensor_data.size};\n"
f"{prefix} float {tensor_name}_storage[] = "
)
header_file.write("{")
for i in np.ndindex(tensor_data.shape):
header_file.write(f"{tensor_data[i]}, ")
header_file.write("};\n\n")
def create_files(input_file, output_dir):
"""
This function generates C files for the input and output arrays required to run inferences
"""
# Create out folder
os.makedirs(output_dir, exist_ok=True)
# Create input header file
input_data = np.loadtxt(input_file)
create_file("inputs", "const", "input", input_data, output_dir)
# Create output header file
output_data = np.zeros([12], np.float32)
create_file(
"outputs",
"",
"output",
output_data,
output_dir,
)
if __name__ == "__main__":
create_files(sys.argv[1], sys.argv[2])
| 2,266 | 32.338235 | 94 | py |
tvm | tvm-main/apps/microtvm/zephyr/template_project/microtvm_api_server.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import atexit
import collections
import collections.abc
import enum
import fcntl
import json
import logging
import os
import os.path
import pathlib
import queue
import re
import shlex
import shutil
import struct
import subprocess
import sys
import tarfile
import tempfile
import threading
from typing import Union
import usb
import psutil
import stat
import serial
import serial.tools.list_ports
import yaml
import server
_LOG = logging.getLogger(__name__)
API_SERVER_DIR = pathlib.Path(os.path.dirname(__file__) or os.path.getcwd())
BUILD_DIR = API_SERVER_DIR / "build"
MODEL_LIBRARY_FORMAT_RELPATH = "model.tar"
IS_TEMPLATE = not (API_SERVER_DIR / MODEL_LIBRARY_FORMAT_RELPATH).exists()
BOARDS = API_SERVER_DIR / "boards.json"
CMAKELIST_FILENAME = "CMakeLists.txt"
# Used to check Zephyr version installed on the host.
# We only check two levels of the version.
ZEPHYR_VERSION = 3.2
WEST_CMD = default = sys.executable + " -m west" if sys.executable else None
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
# Data structure to hold the information microtvm_api_server.py needs
# to communicate with each of these boards.
try:
with open(BOARDS) as boards:
BOARD_PROPERTIES = json.load(boards)
except FileNotFoundError:
raise FileNotFoundError(f"Board file {{{BOARDS}}} does not exist.")
def check_call(cmd_args, *args, **kwargs):
cwd_str = "" if "cwd" not in kwargs else f" (in cwd: {kwargs['cwd']})"
_LOG.info("run%s: %s", cwd_str, " ".join(shlex.quote(a) for a in cmd_args))
return subprocess.check_call(cmd_args, *args, **kwargs)
CACHE_ENTRY_RE = re.compile(r"(?P<name>[^:]+):(?P<type>[^=]+)=(?P<value>.*)")
CMAKE_BOOL_MAP = dict(
[(k, True) for k in ("1", "ON", "YES", "TRUE", "Y")]
+ [(k, False) for k in ("0", "OFF", "NO", "FALSE", "N", "IGNORE", "NOTFOUND", "")]
)
CMSIS_PATH_ERROR = (
"cmsis_path is not defined! Please pass it as an option or set the `CMSIS_PATH` env variable."
)
class CMakeCache(collections.abc.Mapping):
def __init__(self, path):
self._path = path
self._dict = None
def __iter__(self):
return iter(self._dict)
def __getitem__(self, key):
if self._dict is None:
self._dict = self._read_cmake_cache()
return self._dict[key]
def __len__(self):
return len(self._dict)
def _read_cmake_cache(self):
"""Read a CMakeCache.txt-like file and return a dictionary of values."""
entries = collections.OrderedDict()
with open(self._path, encoding="utf-8") as f:
for line in f:
m = CACHE_ENTRY_RE.match(line.rstrip("\n"))
if not m:
continue
if m.group("type") == "BOOL":
value = CMAKE_BOOL_MAP[m.group("value").upper()]
else:
value = m.group("value")
entries[m.group("name")] = value
return entries
CMAKE_CACHE = CMakeCache(BUILD_DIR / "CMakeCache.txt")
class BoardError(Exception):
"""Raised when an attached board cannot be opened (i.e. missing /dev nodes, etc)."""
class BoardAutodetectFailed(Exception):
"""Raised when no attached hardware is found matching the board= given to ZephyrCompiler."""
def _get_flash_runner():
flash_runner = CMAKE_CACHE.get("ZEPHYR_BOARD_FLASH_RUNNER")
if flash_runner is not None:
return flash_runner
with open(CMAKE_CACHE["ZEPHYR_RUNNERS_YAML"]) as f:
doc = yaml.load(f, Loader=yaml.FullLoader)
return doc["flash-runner"]
def _find_board_from_cmake_file(cmake_file: Union[str, pathlib.Path]) -> str:
"""Find Zephyr board from generated CMakeLists.txt"""
zephyr_board = None
with open(cmake_file) as cmake_f:
for line in cmake_f:
if line.startswith("set(BOARD"):
zephyr_board = line.strip("\n").strip("\r").strip(")").split(" ")[1]
break
if not zephyr_board:
raise RuntimeError(f"No Zephyr board set in the {cmake_file}.")
return zephyr_board
def _find_platform_from_cmake_file(cmake_file: Union[str, pathlib.Path]) -> str:
emu_platform = None
with open(cmake_file) as cmake_f:
for line in cmake_f:
set_platform = re.match("set\(EMU_PLATFORM (.*)\)", line)
if set_platform:
emu_platform = set_platform.group(1)
break
return emu_platform
def _get_device_args(serial_number: str = None):
flash_runner = _get_flash_runner()
if flash_runner == "nrfjprog":
return _get_nrf_device_args(serial_number)
if flash_runner == "openocd":
return _get_openocd_device_args(serial_number)
raise BoardError(
f"Don't know how to find serial terminal for board {_find_board_from_cmake_file(API_SERVER_DIR / CMAKELIST_FILENAME)} with flash "
f"runner {flash_runner}"
)
def _get_board_mem_size_bytes(zephyr_base: str, board: str):
board_file_path = pathlib.Path(zephyr_base) / "boards" / "arm" / board / (board + ".yaml")
try:
with open(board_file_path) as f:
board_data = yaml.load(f, Loader=yaml.FullLoader)
return int(board_data["ram"]) * 1024
except:
_LOG.warning("Board memory information is not available.")
return None
DEFAULT_WORKSPACE_SIZE_BYTES = 216 * 1024
def _get_recommended_heap_size_bytes(board: str):
prop = BOARD_PROPERTIES[board]
if "recommended_heap_size_bytes" in prop:
return prop["recommended_heap_size_bytes"]
return DEFAULT_WORKSPACE_SIZE_BYTES
def generic_find_serial_port(serial_number: str = None):
"""Find a USB serial port based on its serial number or its VID:PID.
This method finds a USB serial port device path based on the port's serial number (if given) or
based on the board's idVendor and idProduct ids.
Parameters
----------
serial_number : str
The serial number associated to the USB serial port which the board is attached to. This is
the same number as shown by 'lsusb -v' in the iSerial field.
Returns
-------
Path to the USB serial port device, for example /dev/ttyACM1.
"""
if serial_number:
regex = serial_number
else:
prop = BOARD_PROPERTIES[_find_board_from_cmake_file(API_SERVER_DIR / CMAKELIST_FILENAME)]
device_id = ":".join([prop["vid_hex"], prop["pid_hex"]])
regex = device_id
serial_ports = list(serial.tools.list_ports.grep(regex))
if len(serial_ports) == 0:
raise Exception(f"No serial port found for board {prop['board']}!")
if len(serial_ports) != 1:
ports_lst = ""
for port in serial_ports:
ports_lst += f"Serial port: {port.device}, serial number: {port.serial_number}\n"
raise Exception("Expected 1 serial port, found multiple ports:\n {ports_lst}")
return serial_ports[0].device
def _get_openocd_device_args(serial_number: str = None):
return ["--serial", generic_find_serial_port(serial_number)]
def _get_nrf_device_args(serial_number: str = None) -> list:
# iSerial has string type which could mistmatch with
# the output of `nrfjprog --ids`. Example: 001050007848 vs 1050007848
serial_number = serial_number.lstrip("0")
nrfjprog_args = ["nrfjprog", "--ids"]
nrfjprog_ids = subprocess.check_output(nrfjprog_args, encoding="utf-8")
if not nrfjprog_ids.strip("\n"):
raise BoardAutodetectFailed(f'No attached boards recognized by {" ".join(nrfjprog_args)}')
boards = nrfjprog_ids.split("\n")[:-1]
if len(boards) > 1:
if serial_number is None:
raise BoardError(
"Multiple boards connected; specify one with nrfjprog_snr=: " f'{", ".join(boards)}'
)
if serial_number not in boards:
raise BoardError(f"serial number ({serial_number}) not found in {boards}")
return ["--snr", serial_number]
if not boards:
return []
return ["--snr", boards[0]]
PROJECT_TYPES = []
if IS_TEMPLATE:
for d in (API_SERVER_DIR / "src").iterdir():
if d.is_dir():
PROJECT_TYPES.append(d.name)
PROJECT_OPTIONS = server.default_project_options(
project_type={"choices": tuple(PROJECT_TYPES)},
board={"choices": list(BOARD_PROPERTIES)},
verbose={"optional": ["generate_project"]},
) + [
server.ProjectOption(
"gdbserver_port",
optional=["open_transport"],
type="int",
default=None,
help=("If given, port number to use when running the local gdbserver."),
),
server.ProjectOption(
"serial_number",
optional=["open_transport", "flash"],
type="str",
default=None,
help=("Board serial number."),
),
server.ProjectOption(
"west_cmd",
required=(
["generate_project", "build", "flash", "open_transport"] if not WEST_CMD else None
),
optional=(["generate_project", "build", "flash", "open_transport"] if WEST_CMD else None),
type="str",
default=WEST_CMD,
help=(
"Path to the west tool. If given, supersedes both the zephyr_base "
"option and ZEPHYR_BASE environment variable."
),
),
server.ProjectOption(
"zephyr_base",
required=(["generate_project", "open_transport"] if not ZEPHYR_BASE else None),
optional=(["generate_project", "open_transport"] if ZEPHYR_BASE else ["build"]),
type="str",
default=ZEPHYR_BASE,
help="Path to the zephyr base directory.",
),
server.ProjectOption(
"config_main_stack_size",
optional=["generate_project"],
type="int",
default=None,
help="Sets CONFIG_MAIN_STACK_SIZE for Zephyr board.",
),
server.ProjectOption(
"arm_fvp_path",
optional=["generate_project", "open_transport"],
type="str",
default=None,
help="Path to the FVP binary to invoke.",
),
server.ProjectOption(
"use_fvp",
optional=["generate_project"],
type="bool",
default=False,
help="Run on the FVP emulator instead of hardware.",
),
server.ProjectOption(
"workspace_size_bytes",
optional=["generate_project"],
type="int",
default=None,
help="Sets the value for TVM_WORKSPACE_SIZE_BYTES passed to K_HEAP_DEFINE() to service TVM memory allocation requests.",
),
]
class Handler(server.ProjectAPIHandler):
def __init__(self):
super(Handler, self).__init__()
self._proc = None
def server_info_query(self, tvm_version):
return server.ServerInfo(
platform_name="zephyr",
is_template=IS_TEMPLATE,
model_library_format_path=""
if IS_TEMPLATE
else (API_SERVER_DIR / MODEL_LIBRARY_FORMAT_RELPATH),
project_options=PROJECT_OPTIONS,
)
# These files and directories will be recursively copied into generated projects from the CRT.
CRT_COPY_ITEMS = ("include", "CMakeLists.txt", "src")
# Maps extra line added to prj.conf to a tuple or list of zephyr_board for which it is needed.
EXTRA_PRJ_CONF_DIRECTIVES = {
"CONFIG_TIMER_RANDOM_GENERATOR=y": (
"qemu_x86",
"qemu_riscv32",
"qemu_cortex_r5",
"qemu_riscv64",
),
"CONFIG_ENTROPY_GENERATOR=y": (
"mps2_an521",
"nrf5340dk_nrf5340_cpuapp",
"nucleo_f746zg",
"nucleo_l4r5zi",
"stm32f746g_disco",
),
}
def _create_prj_conf(
self,
project_dir: pathlib.Path,
board: str,
project_type: str,
config_main_stack_size: int,
config_led: bool,
use_fvp: bool,
):
with open(project_dir / "prj.conf", "w") as f:
f.write(
"# For UART used from main().\n"
"CONFIG_RING_BUFFER=y\n"
"CONFIG_UART_CONSOLE=n\n"
"CONFIG_UART_INTERRUPT_DRIVEN=y\n"
"\n"
)
if (
config_led
and not self._is_qemu(board, use_fvp)
and not self._is_fvp(board, use_fvp)
):
f.write("# For debugging.\n" "CONFIG_LED=y\n" "\n")
f.write("# For TVMPlatformAbort().\n" "CONFIG_REBOOT=y\n" "\n")
if project_type == "host_driven":
f.write(
"CONFIG_TIMING_FUNCTIONS=y\n"
"# For RPC server C++ bindings.\n"
"CONFIG_CPLUSPLUS=y\n"
"CONFIG_LIB_CPLUSPLUS=y\n"
"\n"
)
f.write("# For math routines\n" "CONFIG_NEWLIB_LIBC=y\n" "\n")
if self._has_fpu(board):
f.write("# For models with floating point.\n" "CONFIG_FPU=y\n" "\n")
# Set main stack size, if needed.
if config_main_stack_size is not None:
f.write(f"CONFIG_MAIN_STACK_SIZE={config_main_stack_size}\n")
f.write("# For random number generation.\n" "CONFIG_TEST_RANDOM_GENERATOR=y\n")
f.write("\n# Extra prj.conf directives\n")
for line, board_list in self.EXTRA_PRJ_CONF_DIRECTIVES.items():
if board in board_list:
f.write(f"{line}\n")
# TODO(mehrdadh): due to https://github.com/apache/tvm/issues/12721
if board not in ["qemu_riscv64"]:
f.write("# For setting -O2 in compiler.\n" "CONFIG_SPEED_OPTIMIZATIONS=y\n")
f.write("\n")
API_SERVER_CRT_LIBS_TOKEN = "<API_SERVER_CRT_LIBS>"
CMAKE_ARGS_TOKEN = "<CMAKE_ARGS>"
QEMU_PIPE_TOKEN = "<QEMU_PIPE>"
CRT_LIBS_BY_PROJECT_TYPE = {
"host_driven": "microtvm_rpc_server microtvm_rpc_common aot_executor_module aot_executor common",
"aot_standalone_demo": "memory microtvm_rpc_common common",
"mlperftiny": "memory common",
}
def _get_platform_version(self, zephyr_base: str) -> float:
with open(pathlib.Path(zephyr_base) / "VERSION", "r") as f:
lines = f.readlines()
for line in lines:
line = line.replace(" ", "").replace("\n", "").replace("\r", "")
if "VERSION_MAJOR" in line:
version_major = line.split("=")[1]
if "VERSION_MINOR" in line:
version_minor = line.split("=")[1]
return float(f"{version_major}.{version_minor}")
def _cmsis_required(self, project_path: Union[str, pathlib.Path]) -> bool:
"""Check if CMSIS dependency is required."""
project_path = pathlib.Path(project_path)
for path in (project_path / "codegen" / "host" / "src").iterdir():
if path.is_file():
with open(path, "r") as lib_f:
lib_content = lib_f.read()
if any(
header in lib_content
for header in [
"<arm_nnsupportfunctions.h>",
"arm_nn_types.h",
"arm_nnfunctions.h",
]
):
return True
return False
def _generate_cmake_args(
self,
mlf_extracted_path: pathlib.Path,
board: str,
use_fvp: bool,
west_cmd: str,
zephyr_base: str,
verbose: bool,
cmsis_path: pathlib.Path,
) -> str:
cmake_args = "\n# cmake args\n"
if verbose:
cmake_args += "set(CMAKE_VERBOSE_MAKEFILE TRUE)\n"
if zephyr_base:
cmake_args += f"set(ZEPHYR_BASE {zephyr_base})\n"
if west_cmd:
cmake_args += f"set(WEST {west_cmd})\n"
if self._is_qemu(board, use_fvp):
# Some boards support more than one emulator, so ensure QEMU is set.
cmake_args += f"set(EMU_PLATFORM qemu)\n"
if self._is_fvp(board, use_fvp):
cmake_args += "set(EMU_PLATFORM armfvp)\n"
cmake_args += "set(ARMFVP_FLAGS -I)\n"
cmake_args += f"set(BOARD {board})\n"
if self._cmsis_required(mlf_extracted_path):
assert cmsis_path, CMSIS_PATH_ERROR
cmake_args += f"set(CMSIS_PATH {str(cmsis_path)})\n"
return cmake_args
def _copy_src_and_header_files(self, src_dir: pathlib.Path, dst_dir: pathlib.Path):
"""Copy content of src_dir from template project to dst_dir in separate
source and header sub-directories.
"""
for file in os.listdir(src_dir):
file = src_dir / file
if file.is_file():
if file.suffix in [".cc", ".c"]:
shutil.copy2(file, dst_dir / "src")
elif file.suffix in [".h"]:
shutil.copy2(file, dst_dir / "include" / "tvm")
def generate_project(self, model_library_format_path, standalone_crt_dir, project_dir, options):
zephyr_board = options["board"]
project_type = options["project_type"]
zephyr_base = options["zephyr_base"]
west_cmd = options["west_cmd"]
warning_as_error = options.get("warning_as_error")
use_fvp = options.get("use_fvp")
verbose = options.get("verbose")
recommended_heap_size = _get_recommended_heap_size_bytes(zephyr_board)
workspace_size_bytes = options.get("workspace_size_bytes") or recommended_heap_size
board_mem_size = _get_board_mem_size_bytes(zephyr_base, zephyr_board)
compile_definitions = options.get("compile_definitions")
config_main_stack_size = options.get("config_main_stack_size")
extra_files_tar = options.get("extra_files_tar")
cmsis_path = options.get("cmsis_path")
# Check Zephyr version
version = self._get_platform_version(zephyr_base)
if version != ZEPHYR_VERSION:
message = f"Zephyr version found is not supported: found {version}, expected {ZEPHYR_VERSION}."
if warning_as_error is not None and warning_as_error:
raise server.ServerError(message=message)
_LOG.warning(message)
project_dir = pathlib.Path(project_dir)
# Make project directory.
project_dir.mkdir()
# Copy ourselves and other python scripts to the generated project. TVM may perform further build steps on the generated project
# by launching the copy.
current_dir = pathlib.Path(__file__).parent.absolute()
for file in os.listdir(current_dir):
if file.endswith(".py"):
shutil.copy2(current_dir / file, project_dir / file)
# Copy launch script
shutil.copy2(
current_dir / "launch_microtvm_api_server.sh",
project_dir / "launch_microtvm_api_server.sh",
)
# Copy boards.json file to generated project.
shutil.copy2(BOARDS, project_dir / BOARDS.name)
# Copy overlay files
board_overlay_path = API_SERVER_DIR / "app-overlay" / f"{zephyr_board}.overlay"
if board_overlay_path.exists():
shutil.copy2(board_overlay_path, project_dir / f"{zephyr_board}.overlay")
# Place Model Library Format tarball in the special location, which this script uses to decide
# whether it's being invoked in a template or generated project.
project_model_library_format_tar_path = project_dir / MODEL_LIBRARY_FORMAT_RELPATH
shutil.copy2(model_library_format_path, project_model_library_format_tar_path)
# Extract Model Library Format tarball.into <project_dir>/model.
extract_path = os.path.splitext(project_model_library_format_tar_path)[0]
with tarfile.TarFile(project_model_library_format_tar_path) as tf:
os.makedirs(extract_path)
tf.extractall(path=extract_path)
if self._is_qemu(zephyr_board, use_fvp):
shutil.copytree(API_SERVER_DIR / "qemu-hack", project_dir / "qemu-hack")
elif self._is_fvp(zephyr_board, use_fvp):
shutil.copytree(API_SERVER_DIR / "fvp-hack", project_dir / "fvp-hack")
# Populate CRT.
crt_path = project_dir / "crt"
crt_path.mkdir()
for item in self.CRT_COPY_ITEMS:
src_path = os.path.join(standalone_crt_dir, item)
dst_path = crt_path / item
if os.path.isdir(src_path):
shutil.copytree(src_path, dst_path)
else:
shutil.copy2(src_path, dst_path)
# Populate CMakeLists.
with open(project_dir / CMAKELIST_FILENAME, "w") as cmake_f:
with open(API_SERVER_DIR / f"{CMAKELIST_FILENAME}.template", "r") as cmake_template_f:
for line in cmake_template_f:
if self.API_SERVER_CRT_LIBS_TOKEN in line:
crt_libs = self.CRT_LIBS_BY_PROJECT_TYPE[project_type]
line = line.replace("<API_SERVER_CRT_LIBS>", crt_libs)
if self.CMAKE_ARGS_TOKEN in line:
line = self._generate_cmake_args(
extract_path,
zephyr_board,
use_fvp,
west_cmd,
zephyr_base,
verbose,
cmsis_path,
)
if self.QEMU_PIPE_TOKEN in line:
self.qemu_pipe_dir = pathlib.Path(tempfile.mkdtemp())
line = line.replace(self.QEMU_PIPE_TOKEN, str(self.qemu_pipe_dir / "fifo"))
cmake_f.write(line)
if board_mem_size is not None:
assert (
workspace_size_bytes < board_mem_size
), f"Workspace size {workspace_size_bytes} is larger than memory size {board_mem_size} on this board."
cmake_f.write(
f"target_compile_definitions(app PUBLIC -DTVM_WORKSPACE_SIZE_BYTES={workspace_size_bytes})\n"
)
if compile_definitions:
flags = compile_definitions
for item in flags:
if "MAX_DB_INPUT_SIZE" in item or "TH_MODEL_VERSION" in item:
compile_target = "tinymlperf_api"
else:
compile_target = "app"
cmake_f.write(
f"target_compile_definitions({compile_target} PUBLIC {item})\n"
)
if self._is_fvp(zephyr_board, use_fvp):
cmake_f.write(f"target_compile_definitions(app PUBLIC -DFVP=1)\n")
self._create_prj_conf(
project_dir, zephyr_board, project_type, config_main_stack_size, verbose, use_fvp
)
# Populate crt-config.h
crt_config_dir = project_dir / "crt_config"
crt_config_dir.mkdir()
shutil.copy2(
API_SERVER_DIR / "crt_config" / "crt_config.h", crt_config_dir / "crt_config.h"
)
# Populate `src` and `include`
src_dir = project_dir / "src"
src_dir.mkdir()
include_dir = project_dir / "include" / "tvm"
include_dir.mkdir(parents=True)
src_project_type_dir = API_SERVER_DIR / "src" / project_type
self._copy_src_and_header_files(src_project_type_dir, project_dir)
if self._is_fvp(zephyr_board, use_fvp):
self._copy_src_and_header_files(src_project_type_dir / "fvp", project_dir)
if project_type == "mlperftiny":
shutil.copytree(src_project_type_dir / "api", src_dir / "api")
# Populate extra_files
if extra_files_tar:
with tarfile.open(extra_files_tar, mode="r:*") as tf:
tf.extractall(project_dir)
def build(self, options):
if BUILD_DIR.exists():
shutil.rmtree(BUILD_DIR)
BUILD_DIR.mkdir()
zephyr_board = _find_board_from_cmake_file(API_SERVER_DIR / CMAKELIST_FILENAME)
emu_platform = _find_platform_from_cmake_file(API_SERVER_DIR / CMAKELIST_FILENAME)
env = os.environ
if self._is_fvp(zephyr_board, emu_platform == "armfvp"):
env["ARMFVP_BIN_PATH"] = str((API_SERVER_DIR / "fvp-hack").resolve())
# Note: We need to explicitly modify the file permissions and make it an executable to pass CI tests.
# [To Do]: Move permission change to Build.groovy.j2
st = os.stat(env["ARMFVP_BIN_PATH"] + "/FVP_Corstone_SSE-300_Ethos-U55")
os.chmod(
env["ARMFVP_BIN_PATH"] + "/FVP_Corstone_SSE-300_Ethos-U55",
st.st_mode | stat.S_IEXEC,
)
check_call(options["west_cmd"].split(" ") + ["build"], cwd=API_SERVER_DIR, env=env)
# A list of all zephyr_board values which are known to launch using QEMU. Many platforms which
# launch through QEMU by default include "qemu" in their name. However, not all do. This list
# includes those tested platforms which do not include qemu.
_KNOWN_QEMU_ZEPHYR_BOARDS = ["mps2_an521", "mps3_an547"]
# A list of all zephyr_board values which are known to launch using ARM FVP (this script configures
# Zephyr to use that launch method).
_KNOWN_FVP_ZEPHYR_BOARDS = ["mps3_an547"]
@classmethod
def _is_fvp(cls, board, use_fvp):
if use_fvp:
assert (
board in cls._KNOWN_FVP_ZEPHYR_BOARDS
), "FVP can't be used to emulate this board on Zephyr"
return True
return False
@classmethod
def _is_qemu(cls, board, use_fvp=False):
return "qemu" in board or (
board in cls._KNOWN_QEMU_ZEPHYR_BOARDS and not cls._is_fvp(board, use_fvp)
)
@classmethod
def _has_fpu(cls, zephyr_board):
fpu_boards = [name for name, board in BOARD_PROPERTIES.items() if board["fpu"]]
return zephyr_board in fpu_boards
def flash(self, options):
serial_number = options.get("serial_number")
west_cmd_list = options["west_cmd"].split(" ")
if _find_platform_from_cmake_file(API_SERVER_DIR / CMAKELIST_FILENAME):
return # NOTE: qemu requires no flash step--it is launched from open_transport.
flash_runner = _get_flash_runner()
# The nRF5340DK requires an additional `nrfjprog --recover` before each flash cycle.
# This is because readback protection is enabled by default when this device is flashed.
# Otherwise, flashing may fail with an error such as the following:
# ERROR: The operation attempted is unavailable due to readback protection in
# ERROR: your device. Please use --recover to unlock the device.
zephyr_board = _find_board_from_cmake_file(API_SERVER_DIR / CMAKELIST_FILENAME)
if zephyr_board.startswith("nrf5340dk") and flash_runner == "nrfjprog":
recover_args = ["nrfjprog", "--recover"]
recover_args.extend(_get_nrf_device_args(serial_number))
check_call(recover_args, cwd=API_SERVER_DIR / "build")
flash_extra_args = []
if flash_runner == "openocd" and serial_number:
flash_extra_args += ["--cmd-pre-init", f"""hla_serial {serial_number}"""]
if flash_runner == "nrfjprog":
flash_extra_args += _get_nrf_device_args(serial_number)
check_call(
west_cmd_list + ["flash", "-r", flash_runner] + flash_extra_args,
cwd=API_SERVER_DIR / "build",
)
def open_transport(self, options):
west_cmd = options["west_cmd"]
zephyr_board = _find_board_from_cmake_file(API_SERVER_DIR / CMAKELIST_FILENAME)
emu_platform = _find_platform_from_cmake_file(API_SERVER_DIR / CMAKELIST_FILENAME)
if self._is_fvp(zephyr_board, emu_platform == "armfvp"):
arm_fvp_path = options["arm_fvp_path"]
verbose = options.get("verbose")
transport = ZephyrFvpTransport(west_cmd, arm_fvp_path, verbose)
elif self._is_qemu(zephyr_board):
gdbserver_port = options.get("gdbserver_port")
transport = ZephyrQemuTransport(west_cmd, gdbserver_port)
else:
zephyr_base = options["zephyr_base"]
serial_number = options.get("serial_number")
transport = ZephyrSerialTransport(zephyr_base, serial_number)
to_return = transport.open()
self._transport = transport
atexit.register(lambda: self.close_transport())
return to_return
def close_transport(self):
if self._transport is not None:
self._transport.close()
self._transport = None
def read_transport(self, n, timeout_sec):
if self._transport is None:
raise server.TransportClosedError()
return self._transport.read(n, timeout_sec)
def write_transport(self, data, timeout_sec):
if self._transport is None:
raise server.TransportClosedError()
return self._transport.write(data, timeout_sec)
def _set_nonblock(fd):
flag = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flag | os.O_NONBLOCK)
new_flag = fcntl.fcntl(fd, fcntl.F_GETFL)
assert (new_flag & os.O_NONBLOCK) != 0, "Cannot set file descriptor {fd} to non-blocking"
class ZephyrSerialTransport:
NRF5340_VENDOR_ID = 0x1366
# NRF5340_DK v1.0.0 uses VCOM2
# NRF5340_DK v2.0.0 uses VCOM1
NRF5340_DK_BOARD_VCOM_BY_PRODUCT_ID = {0x1055: "VCOM2", 0x1051: "VCOM1"}
@classmethod
def _lookup_baud_rate(cls, zephyr_base: str):
# TODO(mehrdadh): remove this hack once dtlib.py is a standalone project
# https://github.com/zephyrproject-rtos/zephyr/blob/v2.7-branch/scripts/dts/README.txt
sys.path.insert(
0,
os.path.join(zephyr_base, "scripts", "dts", "python-devicetree", "src", "devicetree"),
)
try:
import dtlib # pylint: disable=import-outside-toplevel
finally:
sys.path.pop(0)
dt_inst = dtlib.DT(BUILD_DIR / "zephyr" / "zephyr.dts")
uart_baud = (
dt_inst.get_node("/chosen")
.props["zephyr,console"]
.to_path()
.props["current-speed"]
.to_num()
)
_LOG.debug("zephyr transport: found UART baudrate from devicetree: %d", uart_baud)
return uart_baud
@classmethod
def _find_nrf_serial_port(cls, serial_number: str = None):
com_ports = subprocess.check_output(
["nrfjprog", "--com"] + _get_device_args(serial_number), encoding="utf-8"
)
ports_by_vcom = {}
for line in com_ports.split("\n")[:-1]:
parts = line.split()
ports_by_vcom[parts[2]] = parts[1]
nrf_board = usb.core.find(idVendor=cls.NRF5340_VENDOR_ID)
if nrf_board == None:
raise Exception("_find_nrf_serial_port: unable to find NRF5340DK")
if nrf_board.idProduct in cls.NRF5340_DK_BOARD_VCOM_BY_PRODUCT_ID:
vcom_port = cls.NRF5340_DK_BOARD_VCOM_BY_PRODUCT_ID[nrf_board.idProduct]
else:
raise Exception("_find_nrf_serial_port: unable to find known NRF5340DK product ID")
return ports_by_vcom[vcom_port]
@classmethod
def _find_openocd_serial_port(cls, serial_number: str = None):
return generic_find_serial_port(serial_number)
@classmethod
def _find_jlink_serial_port(cls, serial_number: str = None):
return generic_find_serial_port(serial_number)
@classmethod
def _find_stm32cubeprogrammer_serial_port(cls, serial_number: str = None):
return generic_find_serial_port(serial_number)
@classmethod
def _find_serial_port(cls, serial_number: str = None):
flash_runner = _get_flash_runner()
if flash_runner == "nrfjprog":
return cls._find_nrf_serial_port(serial_number)
if flash_runner == "openocd":
return cls._find_openocd_serial_port(serial_number)
if flash_runner == "jlink":
return cls._find_jlink_serial_port(serial_number)
if flash_runner == "stm32cubeprogrammer":
return cls._find_stm32cubeprogrammer_serial_port(serial_number)
raise RuntimeError(f"Don't know how to deduce serial port for flash runner {flash_runner}")
def __init__(self, zephyr_base: str, serial_number: str = None):
self._zephyr_base = zephyr_base
self._serial_number = serial_number
self._port = None
def open(self):
port_path = self._find_serial_port(self._serial_number)
self._port = serial.Serial(port_path, baudrate=self._lookup_baud_rate(self._zephyr_base))
return server.TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=5.0,
session_established_timeout_sec=5.0,
)
def close(self):
self._port.close()
self._port = None
def read(self, n, timeout_sec):
self._port.timeout = timeout_sec
to_return = self._port.read(n)
if not to_return:
raise server.IoTimeoutError()
return to_return
def write(self, data, timeout_sec):
self._port.write_timeout = timeout_sec
bytes_written = 0
while bytes_written < len(data):
n = self._port.write(data)
data = data[n:]
bytes_written += n
class ZephyrQemuMakeResult(enum.Enum):
QEMU_STARTED = "qemu_started"
MAKE_FAILED = "make_failed"
EOF = "eof"
class ZephyrQemuTransport:
"""The user-facing Zephyr QEMU transport class."""
def __init__(self, west_cmd: str, gdbserver_port: int = None):
self._gdbserver_port = gdbserver_port
self.proc = None
self.pipe_dir = None
self.read_fd = None
self.write_fd = None
self._queue = queue.Queue()
self._west_cmd = west_cmd
def open(self):
with open(BUILD_DIR / "CMakeCache.txt", "r") as cmake_cache_f:
for line in cmake_cache_f:
if "QEMU_PIPE:" in line:
self.pipe = pathlib.Path(line[line.find("=") + 1 :])
break
self.pipe_dir = self.pipe.parents[0]
self.write_pipe = self.pipe_dir / "fifo.in"
self.read_pipe = self.pipe_dir / "fifo.out"
os.mkfifo(self.write_pipe)
os.mkfifo(self.read_pipe)
env = None
if self._gdbserver_port:
env = os.environ.copy()
env["TVM_QEMU_GDBSERVER_PORT"] = self._gdbserver_port
self.proc = subprocess.Popen(
self._west_cmd.split(" ") + ["build", "-t", "run"],
cwd=BUILD_DIR,
env=env,
stdout=subprocess.PIPE,
)
self._wait_for_qemu()
# NOTE: although each pipe is unidirectional, open both as RDWR to work around a select
# limitation on linux. Without this, non-blocking I/O can't use timeouts because named
# FIFO are always considered ready to read when no one has opened them for writing.
self.read_fd = os.open(self.read_pipe, os.O_RDWR | os.O_NONBLOCK)
self.write_fd = os.open(self.write_pipe, os.O_RDWR | os.O_NONBLOCK)
_set_nonblock(self.read_fd)
_set_nonblock(self.write_fd)
return server.TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=10.0,
session_established_timeout_sec=10.0,
)
def close(self):
did_write = False
if self.write_fd is not None:
try:
server.write_with_timeout(
self.write_fd, b"\x01x", 1.0
) # Use a short timeout since we will kill the process
did_write = True
except server.IoTimeoutError:
pass
os.close(self.write_fd)
self.write_fd = None
if self.proc:
if not did_write:
self.proc.terminate()
try:
self.proc.wait(5.0)
except subprocess.TimeoutExpired:
self.proc.kill()
if self.read_fd:
os.close(self.read_fd)
self.read_fd = None
if self.pipe_dir is not None:
shutil.rmtree(self.pipe_dir)
self.pipe_dir = None
def read(self, n, timeout_sec):
return server.read_with_timeout(self.read_fd, n, timeout_sec)
def write(self, data, timeout_sec):
to_write = bytearray()
escape_pos = []
for i, b in enumerate(data):
if b == 0x01:
to_write.append(b)
escape_pos.append(i)
to_write.append(b)
while to_write:
num_written = server.write_with_timeout(self.write_fd, to_write, timeout_sec)
to_write = to_write[num_written:]
def _qemu_check_stdout(self):
for line in self.proc.stdout:
line = str(line)
_LOG.info("%s", line)
if "[QEMU] CPU" in line:
self._queue.put(ZephyrQemuMakeResult.QEMU_STARTED)
else:
line = re.sub("[^a-zA-Z0-9 \n]", "", line)
pattern = r"recipe for target (\w*) failed"
if re.search(pattern, line, re.IGNORECASE):
self._queue.put(ZephyrQemuMakeResult.MAKE_FAILED)
self._queue.put(ZephyrQemuMakeResult.EOF)
def _wait_for_qemu(self):
threading.Thread(target=self._qemu_check_stdout, daemon=True).start()
while True:
try:
item = self._queue.get(timeout=120)
except Exception:
raise TimeoutError("QEMU setup timeout.")
if item == ZephyrQemuMakeResult.QEMU_STARTED:
break
if item in [ZephyrQemuMakeResult.MAKE_FAILED, ZephyrQemuMakeResult.EOF]:
raise RuntimeError("QEMU setup failed.")
raise ValueError(f"{item} not expected.")
class ZephyrFvpMakeResult(enum.Enum):
FVP_STARTED = "fvp_started"
MICROTVM_API_SERVER_INIT = "fvp_initialized"
MAKE_FAILED = "make_failed"
EOF = "eof"
class BlockingStream:
"""Reimplementation of Stream class from Iris with blocking semantics."""
def __init__(self):
self.q = queue.Queue()
self.unread = None
def read(self, n=-1, timeout_sec=None):
assert (
n != -1
), "expect firmware to open stdin using raw mode, and therefore expect sized read requests"
data = b""
if self.unread:
data = data + self.unread
self.unread = None
while len(data) < n:
try:
# When there is some data to return, fetch as much as possible, then return what we can.
# When there is no data yet to return, block.
data += self.q.get(block=not len(data), timeout=timeout_sec)
except queue.Empty:
break
if len(data) > n:
self.unread = data[n:]
data = data[:n]
return data
readline = read
def write(self, data):
self.q.put(data)
class ZephyrFvpTransport:
"""A transport class that communicates with the ARM FVP via Iris server."""
def __init__(self, arm_fvp_path: str, verbose: bool = False):
self._arm_fvp_path = arm_fvp_path
self._verbose = verbose
self.proc = None
self._queue = queue.Queue()
self._import_iris()
def _import_iris(self):
assert self._arm_fvp_path, "arm_fvp_path is not defined."
# Location as seen in the FVP_Corstone_SSE-300_11.15_24 tar.
iris_lib_path = (
pathlib.Path(self._arm_fvp_path).parent.parent.parent / "Iris" / "Python" / "iris"
)
sys.path.insert(0, str(iris_lib_path.parent))
try:
import iris.NetworkModelInitializer
finally:
sys.path.pop(0)
self._iris_lib = iris
def _convertStringToU64Array(strValue):
numBytes = len(strValue)
if numBytes == 0:
return []
numU64 = (numBytes + 7) // 8
# Extend the string ending with '\0', so that the string length is multiple of 8.
# E.g. 'hello' is extended to: 'hello'+\0\0\0
strExt = strValue.ljust(8 * numU64, b"\0")
# Convert the string to a list of uint64_t in little endian
return struct.unpack("<{}Q".format(numU64), strExt)
iris.iris.convertStringToU64Array = _convertStringToU64Array
def open(self):
args = ["ninja"]
if self._verbose:
args.append("-v")
args.append("run")
env = dict(os.environ)
env["ARMFVP_BIN_PATH"] = str(API_SERVER_DIR / "fvp-hack")
self.proc = subprocess.Popen(
args,
cwd=BUILD_DIR,
env=env,
stdout=subprocess.PIPE,
)
threading.Thread(target=self._fvp_check_stdout, daemon=True).start()
self.iris_port = self._wait_for_fvp()
_LOG.info("IRIS started on port %d", self.iris_port)
NetworkModelInitializer = self._iris_lib.NetworkModelInitializer.NetworkModelInitializer
self._model_init = NetworkModelInitializer(
host="localhost", port=self.iris_port, timeout_in_ms=1000
)
self._model = self._model_init.start()
self._target = self._model.get_target("component.FVP_MPS3_Corstone_SSE_300.cpu0")
self._target.handle_semihost_io()
self._target._stdout = BlockingStream()
self._target._stdin = BlockingStream()
self._model.run(blocking=False, timeout=100)
self._wait_for_semihost_init()
_LOG.info("IRIS semihosting initialized.")
return server.TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=10.0,
session_established_timeout_sec=10.0,
)
def _fvp_check_stdout(self):
START_MSG = "Iris server started listening to port"
INIT_MSG = "microTVM Zephyr runtime - running"
for line in self.proc.stdout:
line = str(line, "utf-8")
_LOG.info("%s", line)
start_msg = re.match(START_MSG + r" ([0-9]+)\n", line)
init_msg = re.match(INIT_MSG, line)
if start_msg:
self._queue.put((ZephyrFvpMakeResult.FVP_STARTED, int(start_msg.group(1))))
elif init_msg:
self._queue.put((ZephyrFvpMakeResult.MICROTVM_API_SERVER_INIT, None))
break
else:
line = re.sub("[^a-zA-Z0-9 \n]", "", line)
pattern = r"recipe for target (\w*) failed"
if re.search(pattern, line, re.IGNORECASE):
self._queue.put((ZephyrFvpMakeResult.MAKE_FAILED, None))
self._queue.put((ZephyrFvpMakeResult.EOF, None))
def _wait_for_fvp(self):
"""waiting for the START_MSG to appear on the stdout"""
while True:
try:
item = self._queue.get(timeout=120)
except Exception:
raise TimeoutError("FVP setup timeout.")
if item[0] == ZephyrFvpMakeResult.FVP_STARTED:
return item[1]
if item[0] in [ZephyrFvpMakeResult.MAKE_FAILED, ZephyrFvpMakeResult.EOF]:
raise RuntimeError("FVP setup failed.")
raise ValueError(f"{item} not expected.")
def _wait_for_semihost_init(self):
"""waiting for the INIT_MSG to appear on the stdout"""
while True:
try:
item = self._queue.get(timeout=240)
except Exception:
raise TimeoutError("semihost init timeout.")
if item[0] == ZephyrFvpMakeResult.MICROTVM_API_SERVER_INIT:
return
raise ValueError(f"{item} not expected.")
def close(self):
self._model._shutdown_model()
self._model.client.disconnect(force=True)
parent = psutil.Process(self.proc.pid)
if parent:
for child in parent.children(recursive=True):
child.terminate()
parent.terminate()
def read(self, n, timeout_sec):
return self._target.stdout.read(n, timeout_sec)
def write(self, data, timeout_sec):
self._target.stdin.write(data)
if __name__ == "__main__":
server.main(Handler())
| 45,534 | 34.741758 | 138 | py |
tvm | tvm-main/apps/bundle_deploy/build_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Creates a simple TVM modules."""
import argparse
import os
import pathlib
from tvm import relay
import tvm
from tvm import runtime as tvm_runtime
import logging
from tvm.relay.backend import Runtime
from tvm.contrib import cc as _cc
RUNTIMES = [
(Runtime("crt", {"system-lib": True}), "{name}_c.{ext}"),
(Runtime("cpp", {"system-lib": True}), "{name}_cpp.{ext}"),
]
def build_module(opts):
dshape = (1, 3, 224, 224)
from mxnet.gluon.model_zoo.vision import get_model
block = get_model("mobilenet0.25", pretrained=True)
shape_dict = {"data": dshape}
mod, params = relay.frontend.from_mxnet(block, shape_dict)
func = mod["main"]
func = relay.Function(
func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs
)
for runtime, file_format_str in RUNTIMES:
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
graph, lib, params = relay.build(func, "llvm", runtime=runtime, params=params)
build_dir = os.path.abspath(opts.out_dir)
if not os.path.isdir(build_dir):
os.makedirs(build_dir)
ext = "tar" if str(runtime) == "crt" else "o"
lib_file_name = os.path.join(build_dir, file_format_str.format(name="model", ext=ext))
if str(runtime) == "crt":
lib.export_library(lib_file_name)
else:
# NOTE: at present, export_libarary will always create _another_ shared object, and you
# can't stably combine two shared objects together (in this case, init_array is not
# populated correctly when you do that). So for now, must continue to use save() with the
# C++ library.
# TODO(areusch): Obliterate runtime.cc and replace with libtvm_runtime.so.
lib.save(lib_file_name)
with open(
os.path.join(build_dir, file_format_str.format(name="graph", ext="json")), "w"
) as f_graph_json:
f_graph_json.write(graph)
with open(
os.path.join(build_dir, file_format_str.format(name="params", ext="bin")), "wb"
) as f_params:
f_params.write(tvm_runtime.save_param_dict(params))
def build_test_module(opts):
import numpy as np
x = relay.var("x", shape=(10, 5))
y = relay.var("y", shape=(1, 5))
z = relay.add(x, y)
func = relay.Function([x, y], z)
x_data = np.random.rand(10, 5).astype("float32")
y_data = np.random.rand(1, 5).astype("float32")
params = {"y": y_data}
for runtime, file_format_str in RUNTIMES:
with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
graph, lib, lowered_params = relay.build(
tvm.IRModule.from_expr(func),
"llvm",
runtime=runtime,
params=params,
)
build_dir = os.path.abspath(opts.out_dir)
if not os.path.isdir(build_dir):
os.makedirs(build_dir)
ext = "tar" if str(runtime) == "crt" else "o"
lib_file_name = os.path.join(build_dir, file_format_str.format(name="test_model", ext=ext))
if str(runtime) == "crt":
lib.export_library(lib_file_name)
else:
# NOTE: at present, export_libarary will always create _another_ shared object, and you
# can't stably combine two shared objects together (in this case, init_array is not
# populated correctly when you do that). So for now, must continue to use save() with the
# C++ library.
# TODO(areusch): Obliterate runtime.cc and replace with libtvm_runtime.so.
lib.save(lib_file_name)
with open(
os.path.join(build_dir, file_format_str.format(name="test_graph", ext="json")), "w"
) as f_graph_json:
f_graph_json.write(graph)
with open(
os.path.join(build_dir, file_format_str.format(name="test_params", ext="bin")), "wb"
) as f_params:
f_params.write(tvm_runtime.save_param_dict(lowered_params))
with open(
os.path.join(build_dir, file_format_str.format(name="test_data", ext="bin")), "wb"
) as fp:
fp.write(x_data.astype(np.float32).tobytes())
x_output = x_data + y_data
with open(
os.path.join(build_dir, file_format_str.format(name="test_output", ext="bin")), "wb"
) as fp:
fp.write(x_output.astype(np.float32).tobytes())
def build_inputs(opts):
from tvm.contrib import download
from PIL import Image
import numpy as np
build_dir = os.path.abspath(opts.out_dir)
# Download test image
image_url = "https://homes.cs.washington.edu/~moreau/media/vta/cat.jpg"
image_fn = os.path.join(build_dir, "cat.png")
download.download(image_url, image_fn)
image = Image.open(image_fn).resize((224, 224))
def transform_image(image):
image = np.array(image) - np.array([123.0, 117.0, 104.0])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
return image
x = transform_image(image)
print("x", x.shape)
with open(os.path.join(build_dir, "cat.bin"), "wb") as fp:
fp.write(x.astype(np.float32).tobytes())
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--out-dir", default=".")
parser.add_argument("-t", "--test", action="store_true")
opts = parser.parse_args()
if opts.test:
build_test_module(opts)
else:
build_module(opts)
build_inputs(opts)
| 6,491 | 37.642857 | 101 | py |
tvm | tvm-main/apps/ios_rpc/init_proj.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import re
default_team_id = "3FR42MXLK9"
default_tvm_build_dir = "path-to-tvm-ios-build-folder"
parser = argparse.ArgumentParser(
description="Update tvmrpc.xcodeproj\
developer information"
)
parser.add_argument(
"--team_id",
type=str,
required=True,
help="Apple Developer Team ID.\n\
Can be found here:\n\
\n\
https://developer.apple.com/account/#/membership\n\
(example: {})".format(
default_team_id
),
)
parser.add_argument(
"--tvm_build_dir",
type=str,
required=True,
help="Path to directory with libtvm_runtime.dylib",
)
args = parser.parse_args()
team_id = args.team_id
tvm_build_dir = args.tvm_build_dir
fi = open("tvmrpc.xcodeproj/project.pbxproj")
proj_config = fi.read()
fi.close()
proj_config = proj_config.replace(default_team_id, team_id)
proj_config = proj_config.replace(default_tvm_build_dir, tvm_build_dir)
fo = open("tvmrpc.xcodeproj/project.pbxproj", "w")
fo.write(proj_config)
fo.close()
| 1,845 | 29.766667 | 71 | py |
tvm | tvm-main/apps/ios_rpc/tests/ios_rpc_test.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testcode for iOS RPC.
To use it, start a rpc proxy with "python -m tvm.exec.rpc_proxy".
And configure the proxy host field as commented.
"""
import argparse
import os
import re
import sys
import numpy as np
import tvm
from tvm import rpc, te
from tvm.contrib import utils, xcode
# Change target configuration, this is setting for iphone6s
arch = "arm64"
sdk = "iphoneos"
target = "llvm -mtriple=%s-apple-darwin" % arch
MODES = {"proxy": rpc.connect, "tracker": rpc.connect_tracker, "standalone": rpc.connect}
# override metal compiler to compile to iphone
@tvm.register_func("tvm_callback_metal_compile")
def compile_metal(src, target):
return xcode.compile_metal(src, sdk=sdk)
def test_rpc_module(host, port, key, mode):
# graph
n = tvm.runtime.convert(1024)
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
temp = utils.tempdir()
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=64)
s[B].bind(xi, te.thread_axis("threadIdx.x"))
s[B].bind(xo, te.thread_axis("blockIdx.x"))
# Build the dynamic lib.
# If we don't want to do metal and only use cpu, just set target to be target
f = tvm.build(s, [A, B], tvm.target.Target("metal", host=target), name="myadd")
path_dso1 = temp.relpath("dev_lib.dylib")
f.export_library(path_dso1, xcode.create_dylib, arch=arch, sdk=sdk)
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=64)
s[B].parallel(xi)
s[B].pragma(xo, "parallel_launch_point")
s[B].pragma(xi, "parallel_barrier_when_finish")
f = tvm.build(s, [A, B], target, name="myadd_cpu")
path_dso2 = temp.relpath("cpu_lib.dylib")
f.export_library(path_dso2, xcode.create_dylib, arch=arch, sdk=sdk)
# connect to the proxy
if mode == "tracker":
remote = MODES[mode](host, port).request(key)
else:
remote = MODES[mode](host, port, key=key)
remote.upload(path_dso1)
dev = remote.metal(0)
f1 = remote.load_module("dev_lib.dylib")
a_np = np.random.uniform(size=1024).astype(A.dtype)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), dev)
time_f = f1.time_evaluator(f1.entry_name, dev, number=10)
cost = time_f(a, b).mean
print("Metal: %g secs/op" % cost)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
# CPU
dev = remote.cpu(0)
remote.upload(path_dso2)
f2 = remote.load_module("cpu_lib.dylib")
a_np = np.random.uniform(size=1024).astype(A.dtype)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), dev)
time_f = f2.time_evaluator(f2.entry_name, dev, number=10)
cost = time_f(a, b).mean
print("CPU: %g secs/op" % cost)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Demo app demonstrates how ios_rpc works.")
parser.add_argument("--host", required=True, type=str, help="Adress of rpc server")
parser.add_argument("--port", type=int, default=9090, help="rpc port (default: 9090)")
parser.add_argument("--key", type=str, default="iphone", help="device key (default: iphone)")
parser.add_argument(
"--mode",
type=str,
default="tracker",
help="type of RPC connection (default: tracker), possible values: {}".format(
", ".join(MODES.keys())
),
)
args = parser.parse_args()
assert args.mode in MODES.keys()
test_rpc_module(args.host, args.port, args.key, args.mode)
| 4,352 | 36.205128 | 97 | py |
tvm | tvm-main/apps/ios_rpc/tests/ios_rpc_mobilenet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import os
import re
import sys
import coremltools
import numpy as np
import tvm
from mxnet import gluon
from PIL import Image
from tvm import relay, rpc
from tvm.contrib import coreml_runtime, graph_executor, utils, xcode
from tvm.contrib.download import download_testdata
from tvm.contrib.target import coreml as _coreml
from tvm.relay import transform
from tvm.relay.expr_functor import ExprMutator
from tvm.relay.op.annotation import compiler_begin, compiler_end
from tvm.relay.quantize.quantize import prerequisite_optimize
# Change target configuration, this is setting for iphone6s
# arch = "x86_64"
# sdk = "iphonesimulator"
arch = "arm64"
sdk = "iphoneos"
target_host = "llvm -mtriple=%s-apple-darwin" % arch
MODES = {"proxy": rpc.connect, "tracker": rpc.connect_tracker, "standalone": rpc.connect}
# override metal compiler to compile to iphone
@tvm.register_func("tvm_callback_metal_compile")
def compile_metal(src, target):
return xcode.compile_metal(src, sdk=sdk)
def prepare_input():
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_name = "cat.png"
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "imagenet1000_clsid_to_human.txt"
img_path = download_testdata(img_url, "cat.png", module="data")
synset_path = download_testdata(synset_url, synset_name, module="data")
with open(synset_path) as f:
synset = eval(f.read())
image = Image.open(img_path).resize((224, 224))
image = np.array(image) - np.array([123.0, 117.0, 104.0])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
return image.astype("float32"), synset
def get_model(model_name, data_shape):
gluon_model = gluon.model_zoo.vision.get_model(model_name, pretrained=True)
mod, params = relay.frontend.from_mxnet(gluon_model, {"data": data_shape})
# we want a probability so add a softmax operator
func = mod["main"]
func = relay.Function(
func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs
)
return func, params
def test_mobilenet(host, port, key, mode):
temp = utils.tempdir()
image, synset = prepare_input()
model, params = get_model("mobilenetv2_1.0", image.shape)
def run(mod, target):
with relay.build_config(opt_level=3):
lib = relay.build(
mod, target=tvm.target.Target(target, host=target_host), params=params
)
path_dso = temp.relpath("deploy.dylib")
lib.export_library(path_dso, xcode.create_dylib, arch=arch, sdk=sdk)
# connect to the proxy
if mode == "tracker":
remote = MODES[mode](host, port).request(key)
else:
remote = MODES[mode](host, port, key=key)
remote.upload(path_dso)
if target == "metal":
dev = remote.metal(0)
else:
dev = remote.cpu(0)
lib = remote.load_module("deploy.dylib")
m = graph_executor.GraphModule(lib["default"](dev))
m.set_input("data", tvm.nd.array(image, dev))
m.run()
tvm_output = m.get_output(0)
top1 = np.argmax(tvm_output.numpy()[0])
print("TVM prediction top-1:", top1, synset[top1])
# evaluate
ftimer = m.module.time_evaluator("run", dev, number=3, repeat=10)
prof_res = np.array(ftimer().results) * 1000
print("%-19s (%s)" % ("%.2f ms" % np.mean(prof_res), "%.2f ms" % np.std(prof_res)))
def annotate(func, compiler):
"""
An annotator for Core ML.
"""
# Bind free variables to the constant values.
bind_dict = {}
for arg in func.params:
name = arg.name_hint
if name in params:
bind_dict[arg] = relay.const(params[name])
func = relay.bind(func, bind_dict)
# Annotate the entire graph for Core ML
mod = tvm.IRModule()
mod["main"] = func
seq = tvm.transform.Sequential(
[
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
transform.AnnotateTarget(compiler),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
with relay.build_config(opt_level=3):
mod = seq(mod)
return mod
# CPU
run(model, target_host)
# Metal
run(model, "metal")
# CoreML
run(annotate(model, "coremlcompiler"), target_host)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Demo app demonstrates how ios_rpc works.")
parser.add_argument("--host", required=True, type=str, help="Adress of rpc server")
parser.add_argument("--port", type=int, default=9090, help="rpc port (default: 9090)")
parser.add_argument("--key", type=str, default="iphone", help="device key (default: iphone)")
parser.add_argument(
"--mode",
type=str,
default="tracker",
help="type of RPC connection (default: tracker), possible values: {}".format(
", ".join(MODES.keys())
),
)
args = parser.parse_args()
assert args.mode in MODES.keys()
test_mobilenet(args.host, args.port, args.key, args.mode)
| 6,368 | 33.241935 | 97 | py |
tvm | tvm-main/apps/wasm-standalone/wasm-graph/tools/build_graph_lib.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Builds a simple resnet50 graph for testing."""
import argparse
import os
import subprocess
import sys
import onnx
import tvm
from tvm import relay, runtime
from tvm.contrib.download import download_testdata
from tvm.contrib import graph_executor
from PIL import Image
import numpy as np
import tvm.relay as relay
# This example uses resnet50-v2-7 model
model_url = (
"https://github.com/onnx/models/raw/main/"
"vision/classification/resnet/model/"
"resnet50-v2-7.onnx"
)
def build_graph_lib(opt_level):
"""Compiles the pre-trained model with TVM"""
out_dir = os.path.join(sys.path[0], "../lib")
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Follow the tutorial to download and compile the model
model_path = download_testdata(model_url, "resnet50-v2-7.onnx", module="onnx")
onnx_model = onnx.load(model_path)
img_url = "https://s3.amazonaws.com/model-server/inputs/kitten.jpg"
img_path = download_testdata(img_url, "imagenet_cat.png", module="data")
# Resize it to 224x224
resized_image = Image.open(img_path).resize((224, 224))
img_data = np.asarray(resized_image).astype("float32")
# Our input image is in HWC layout while ONNX expects CHW input, so convert the array
img_data = np.transpose(img_data, (2, 0, 1))
# Normalize according to the ImageNet input specification
imagenet_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
imagenet_stddev = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
norm_img_data = (img_data / 255 - imagenet_mean) / imagenet_stddev
# Add the batch dimension, as we are expecting 4-dimensional input: NCHW.
img_data = np.expand_dims(norm_img_data, axis=0)
input_name = "data"
shape_dict = {input_name: img_data.shape}
mod, params = relay.frontend.from_onnx(onnx_model, shape_dict)
target = "llvm -mtriple=wasm32-unknown-unknown -mattr=+simd128"
with tvm.transform.PassContext(opt_level=opt_level):
factory = relay.build(
mod,
target=target,
params=params,
runtime=tvm.relay.backend.Runtime("cpp", {"system-lib": True}),
)
# Save the model artifacts to obj_file
obj_file = os.path.join(out_dir, "graph.o")
factory.get_lib().save(obj_file)
# Run llvm-ar to archive obj_file into lib_file
lib_file = os.path.join(out_dir, "libgraph_wasm32.a")
cmds = [os.environ.get("LLVM_AR", "llvm-ar-10"), "rcs", lib_file, obj_file]
subprocess.run(cmds)
# Save the json and params
with open(os.path.join(out_dir, "graph.json"), "w") as f_graph:
f_graph.write(factory.get_graph_json())
with open(os.path.join(out_dir, "graph.params"), "wb") as f_params:
f_params.write(runtime.save_param_dict(factory.get_params()))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="ONNX model build example")
parser.add_argument(
"-O",
"--opt-level",
type=int,
default=0,
help="level of optimization. 0 is non-optimized and 3 is the highest level",
)
args = parser.parse_args()
build_graph_lib(args.opt_level)
| 3,979 | 34.221239 | 89 | py |
tvm | tvm-main/apps/tf_tvmdsoop/tests/test_tfop_module.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test script for tf op module"""
import tempfile
import os
import logging
import tensorflow as tf
import numpy as np
import tvm
from tvm import te
from tvm.contrib import tf_op
def test_use_tvmdso_op():
"""main test function"""
def export_cpu_add_lib():
"""create cpu add op lib"""
n = te.var("n")
ph_a = te.placeholder((n,), name="ph_a")
ph_b = te.placeholder((n,), name="ph_b")
ph_c = te.compute(ph_a.shape, lambda i: ph_a[i] + ph_b[i], name="ph_c")
sched = te.create_schedule(ph_c.op)
fadd_dylib = tvm.build(sched, [ph_a, ph_b, ph_c], "c", name="vector_add")
lib_path = tempfile.mktemp("tvm_add_dll.so")
fadd_dylib.export_library(lib_path)
return lib_path
def export_gpu_add_lib():
"""create gpu add op lib"""
n = te.var("n")
ph_a = te.placeholder((n,), name="ph_a")
ph_b = te.placeholder((n,), name="ph_b")
ph_c = te.compute(ph_a.shape, lambda i: ph_a[i] + ph_b[i], name="ph_c")
sched = te.create_schedule(ph_c.op)
b_axis, t_axis = sched[ph_c].split(ph_c.op.axis[0], factor=64)
sched[ph_c].bind(b_axis, te.thread_axis("blockIdx.x"))
sched[ph_c].bind(t_axis, te.thread_axis("threadIdx.x"))
fadd_dylib = tvm.build(sched, [ph_a, ph_b, ph_c], "cuda", name="vector_add")
lib_path = tempfile.mktemp("tvm_add_cuda_dll.so")
fadd_dylib.export_library(lib_path)
return lib_path
def test_add(session, lib_path, tf_device):
"""test add lib with TensorFlow wrapper"""
module = tf_op.OpModule(lib_path)
left = tf.placeholder("float32", shape=[4])
right = tf.placeholder("float32", shape=[4])
feed_dict = {left: [1.0, 2.0, 3.0, 4.0], right: [5.0, 6.0, 7.0, 8.0]}
expect = np.asarray([6.0, 8.0, 10.0, 12.0])
add1 = module.func("vector_add", output_shape=[4], output_dtype="float")
add2 = module.func("vector_add", output_shape=tf.shape(left), output_dtype="float")
add3 = module.func("vector_add", output_shape=[tf.shape(left)[0]], output_dtype="float")
with tf.device(tf_device):
output1 = session.run(add1(left, right), feed_dict)
np.testing.assert_equal(output1, expect)
output2 = session.run(add2(left, right), feed_dict)
np.testing.assert_equal(output2, expect)
output3 = session.run(add3(left, right), feed_dict)
np.testing.assert_equal(output3, expect)
def cpu_test(session):
"""test function for cpu"""
cpu_lib = None
try:
cpu_lib = export_cpu_add_lib()
test_add(session, cpu_lib, "/cpu:0")
finally:
if cpu_lib is not None:
os.remove(cpu_lib)
def gpu_test(session):
"""test function for gpu"""
gpu_lib = None
try:
gpu_lib = export_gpu_add_lib()
test_add(session, gpu_lib, "/gpu:0")
finally:
if gpu_lib is not None:
os.remove(gpu_lib)
with tf.Session() as session:
if tvm.runtime.enabled("cpu"):
logging.info("Test TensorFlow op on cpu kernel")
cpu_test(session)
if tvm.runtime.enabled("gpu"):
logging.info("Test TensorFlow op on gpu kernel")
gpu_test(session)
if __name__ == "__main__":
test_use_tvmdso_op()
| 4,230 | 35.791304 | 96 | py |
tvm | tvm-main/apps/cpp_clml/scripts/clml_codegen.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sys
import numpy as np
import tvm
from tvm import relay
from tvm.driver import tvmc
from tvm.relay.op.contrib import clml
from tvm.contrib import utils
from string import Template
def main():
print("CLML Codegen")
if len(sys.argv) != 2:
print("Usage: python clml_codegen.py <model_path>")
return
tvmc_model = tvmc.load(sys.argv[1])
mod = tvmc_model.mod
params = tvmc_model.params
with tvm.transform.PassContext(opt_level=3):
mod = tvmc.transform.convert_graph_layout(mod, "NCHW")
with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]):
clml_mod = clml.partition_for_clml(mod, params)
libm = relay.build(
clml_mod,
target="opencl",
target_host="llvm -mtriple=aarch64-linux-gnu",
params=params,
)
# Extract CLML related params
(clml_params_save, gen_src) = clml.CLMLGenSrc(libm).get_artifacts()
np.savez("clml_params.npz", **clml_params_save)
f_src = open("../clml_models.cc", "w")
f_src.write("\n".join(gen_src))
f_src.close()
os.popen("clang-format-10 -i ../clml_models.cc")
if __name__ == "__main__":
main()
| 2,074 | 30.923077 | 81 | py |
tvm | tvm-main/apps/sgx/read_results.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import struct
import sys
import numpy as np
def float_bytes(l):
for i in range(0, len(l), 4):
yield l[i : i + 4]
floats = [struct.unpack("f", f)[0] for f in float_bytes(sys.stdin.buffer.read())]
print(np.array(floats))
| 1,022 | 32 | 81 | py |
tvm | tvm-main/apps/sgx/src/build_model.py | #!/usr/bin/python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Creates a simple TVM modules."""
import os
from os import path as osp
import sys
from tvm import relay, runtime
from tvm.relay import testing
import tvm
from tvm import te
def main():
dshape = (1, 28, 28)
net, params = relay.testing.mlp.get_workload(batch_size=dshape[0], dtype="float32")
dshape = (1, 3, 224, 224)
net, params = relay.testing.resnet.get_workload(
layers=18, batch_size=dshape[0], image_shape=dshape[1:]
)
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(
net,
"llvm",
params=params,
runtime=tvm.relay.backend.Runtime("cpp", {"system-lib": True}),
)
build_dir = osp.abspath(sys.argv[1])
if not osp.isdir(build_dir):
os.makedirs(build_dir, exist_ok=True)
lib.save(osp.join(build_dir, "model.o"))
with open(osp.join(build_dir, "graph.json"), "w") as f_graph_json:
f_graph_json.write(graph)
with open(osp.join(build_dir, "params.bin"), "wb") as f_params:
f_params.write(runtime.save_param_dict(params))
if __name__ == "__main__":
main()
| 1,950 | 30.467742 | 87 | py |
tvm | tvm-main/apps/cpp_rtvm/scripts/download_models.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
tmp_dir = "./model_data/"
dload_models = []
# Keras : Resnet50
try:
from tensorflow.keras.applications.resnet50 import ResNet50
model_file_name = "{}/{}".format(tmp_dir + "keras-resnet50", "resnet50.h5")
model = ResNet50(include_top=True, weights="imagenet", input_shape=(224, 224, 3), classes=1000)
model.save(model_file_name)
dload_models.append(model_file_name)
except ImportError:
LOG.warning("Keras is not installed, skipping Keras models")
print("Models:", dload_models)
| 1,339 | 35.216216 | 99 | py |
tvm | tvm-main/apps/android_camera/models/prepare_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import pathlib
from pathlib import Path
from typing import Union
import os
from os import environ
import json
import tvm
import tvm.relay as relay
from tvm.contrib import utils, ndk, graph_executor as runtime
from tvm.contrib.download import download_testdata, download
target = "llvm -mtriple=arm64-linux-android"
target_host = None
def del_dir(target: Union[Path, str], only_if_empty: bool = False):
target = Path(target).expanduser()
assert target.is_dir()
for p in sorted(target.glob("**/*"), reverse=True):
if not p.exists():
continue
p.chmod(0o666)
if p.is_dir():
p.rmdir()
else:
if only_if_empty:
raise RuntimeError(f"{p.parent} is not empty!")
p.unlink()
target.rmdir()
def get_model(model_name, batch_size=1):
if model_name == "resnet18_v1":
import mxnet as mx
from mxnet import gluon
from mxnet.gluon.model_zoo import vision
gluon_model = vision.get_model(model_name, pretrained=True)
img_size = 224
data_shape = (batch_size, 3, img_size, img_size)
net, params = relay.frontend.from_mxnet(gluon_model, {"data": data_shape})
return (net, params)
elif model_name == "mobilenet_v2":
import keras
from keras.applications.mobilenet_v2 import MobileNetV2
keras.backend.clear_session() # Destroys the current TF graph and creates a new one.
weights_url = "".join(
[
"https://github.com/JonathanCMitchell/",
"mobilenet_v2_keras/releases/download/v1.1/",
"mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5",
]
)
weights_file = "mobilenet_v2_weights.h5"
weights_path = download_testdata(weights_url, weights_file, module="keras")
keras_mobilenet_v2 = MobileNetV2(
alpha=0.5, include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000
)
keras_mobilenet_v2.load_weights(weights_path)
img_size = 224
data_shape = (batch_size, 3, img_size, img_size)
mod, params = relay.frontend.from_keras(keras_mobilenet_v2, {"input_1": data_shape})
return (mod, params)
def main(model_str, output_path):
if output_path.exists():
del_dir(output_path)
output_path.mkdir()
output_path_str = os.fspath(output_path)
print(model_str)
print("getting model...")
net, params = get_model(model_str)
try:
os.mkdir(model_str)
except FileExistsError:
pass
print("building...")
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(net, tvm.target.Target(target, target_host), params=params)
print("dumping lib...")
lib.export_library(output_path_str + "/" + "deploy_lib_cpu.so", ndk.create_shared)
print("dumping graph...")
with open(output_path_str + "/" + "deploy_graph.json", "w") as f:
f.write(graph)
print("dumping params...")
with open(output_path_str + "/" + "deploy_param.params", "wb") as f:
f.write(tvm.runtime.save_param_dict(params))
print("dumping labels...")
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_path = output_path_str + "/image_net_labels"
download(synset_url, output_path_str + "/image_net_labels")
with open(synset_path) as fi:
synset = eval(fi.read())
with open(output_path_str + "/image_net_labels.json", "w") as fo:
json.dump(synset, fo, indent=4)
os.remove(synset_path)
if __name__ == "__main__":
if environ.get("TVM_NDK_CC") is None:
raise RuntimeError("Require environment variable TVM_NDK_CC")
models_path = Path().absolute().parent.joinpath("app/src/main/assets/models/")
if not models_path.exists():
models_path.mkdir(parents=True)
models = {
"mobilenet_v2": models_path.joinpath("mobilenet_v2"),
"resnet18_v1": models_path.joinpath("resnet18_v1"),
}
for model, output_path in models.items():
main(model, output_path)
| 5,118 | 35.564286 | 100 | py |
tvm | tvm-main/apps/topi_recipe/reduce/test_reduce_map.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import numpy as np
import tvm
from tvm import te, topi
from tvm.contrib import nvcc
TASK = "reduce_map"
USE_MANUAL_CODE = False
def write_code(code, fname):
with open(fname, "w") as f:
f.write(code)
@tvm.register_func
def tvm_callback_cuda_postproc(code, target):
if not os.path.exists("perf"):
os.mkdir("perf")
write_code(code, "perf/%s_generated.cu" % TASK)
if USE_MANUAL_CODE:
code = open("perf/%s_manual.cu" % TASK).read()
return code
def test_reduce_map(in_shape, axis, keepdims, type="sum", test_id=0):
global TASK
# Build the logic and compile the function
A = te.placeholder(shape=in_shape, name="A")
if type == "sum":
TASK = "sum_map_id%d" % test_id
B = topi.sum(A, axis=axis, keepdims=keepdims)
elif type == "max":
TASK = "max_map_id%d" % test_id
B = topi.max(A, axis=axis, keepdims=keepdims)
elif type == "min":
TASK = "min_map_id%d" % test_id
B = topi.min(A, axis=axis, keepdims=keepdims)
else:
raise NotImplementedError
s = topi.cuda.schedule_reduce(B)
with tvm.transform.PassContext(
config={
"tir.UnrollLoop": {
"auto_max_step": 16,
}
}
):
fcuda = tvm.build(s, [A, B], "cuda", name="sum")
# Test
in_npy = np.random.normal(size=in_shape).astype(np.float32)
if type == "sum":
out_npy = in_npy.sum(axis=axis, keepdims=keepdims)
elif type == "max":
out_npy = in_npy.max(axis=axis, keepdims=keepdims)
elif type == "min":
out_npy = in_npy.min(axis=axis, keepdims=keepdims)
else:
raise NotImplementedError
data_tvm = tvm.nd.array(in_npy, device=tvm.cuda())
out_tvm = tvm.nd.empty(shape=out_npy.shape, device=tvm.cuda())
for _ in range(2):
fcuda(data_tvm, out_tvm)
tvm.testing.assert_allclose(out_tvm.numpy(), out_npy, rtol=4e-4, atol=4e-4)
if __name__ == "__main__":
test_reduce_map(
in_shape=(128, 24, 128, 24), axis=(1, 2, 3), keepdims=True, type="sum", test_id=0
)
test_reduce_map(in_shape=(128, 24 * 128 * 24), axis=(1,), keepdims=False, type="max", test_id=1)
test_reduce_map(in_shape=(32, 128, 24), axis=None, keepdims=True, type="sum", test_id=2)
test_reduce_map(in_shape=(128, 24, 128, 24), axis=(0, 2), keepdims=False, type="min", test_id=3)
| 3,179 | 32.829787 | 100 | py |
tvm | tvm-main/apps/topi_recipe/broadcast/test_broadcast_map.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import numpy as np
import tvm
from tvm import te, topi
from tvm.contrib import nvcc
TASK = "reduce_map"
USE_MANUAL_CODE = False
@tvm.register_func("tvm_callback_cuda_compile", override=True)
def tvm_callback_cuda_compile(code, target):
ptx = nvcc.compile_cuda(code, target_format="ptx")
return ptx
def write_code(code, fname):
with open(fname, "w") as f:
f.write(code)
@tvm.register_func
def tvm_callback_cuda_postproc(code, target):
if not os.path.exists("perf"):
os.mkdir("perf")
write_code(code, "perf/%s_generated.cu" % TASK)
if USE_MANUAL_CODE:
code = open("perf/%s_manual.cu" % TASK).read()
return code
def test_broadcast_to(in_shape, out_shape):
global TASK
TASK = (
"bcast_to_i"
+ "_".join([str(ele) for ele in in_shape])
+ "o"
+ "_".join([str(ele) for ele in out_shape])
)
# Build the logic and compile the function
A = te.placeholder(shape=in_shape, name="A")
B = topi.broadcast_to(A, out_shape)
s = topi.cuda.schedule_broadcast(B)
fcuda = tvm.build(s, [A, B], "cuda", name="broadcast_to")
data_npy = np.random.uniform(size=in_shape).astype(A.dtype)
out_npy = np.broadcast_to(data_npy, out_shape)
data_nd = tvm.nd.array(data_npy, tvm.cuda())
out_nd = tvm.nd.array(np.empty(out_shape).astype(B.dtype), tvm.cuda())
for _ in range(2):
fcuda(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
def test_broadcast_binary_op(lhs_shape, rhs_shape, typ="add"):
global TASK
TASK = (
"bcast_binary_"
+ typ
+ "_lhs"
+ "_".join([str(ele) for ele in lhs_shape])
+ "rhs"
+ "_".join([str(ele) for ele in rhs_shape])
)
A = te.placeholder(shape=lhs_shape, name="A")
B = te.placeholder(shape=rhs_shape, name="B")
if typ == "add":
C = topi.broadcast_add(A, B)
elif typ == "sub":
C = topi.broadcast_sub(A, B)
elif typ == "div":
C = topi.broadcast_div(A, B)
elif typ == "mul":
C = topi.broadcast_mul(A, B)
elif typ == "maximum":
C = topi.broadcast_maximum(A, B)
elif typ == "minimum":
C = topi.broadcast_minimum(A, B)
else:
raise NotImplementedError
s = topi.cuda.schedule_broadcast(C)
fcuda = tvm.build(s, [A, B, C], "cuda", name="broadcast_binary" + "_" + typ)
lhs_npy = np.random.uniform(size=lhs_shape).astype(A.dtype)
rhs_npy = np.random.uniform(size=rhs_shape).astype(A.dtype)
if typ == "add":
out_npy = lhs_npy + rhs_npy
elif typ == "sub":
out_npy = lhs_npy - rhs_npy
elif typ == "div":
rhs_npy = np.abs(rhs_npy) + 0.001
out_npy = lhs_npy / rhs_npy
elif typ == "mul":
out_npy = lhs_npy * rhs_npy
elif typ == "maximum":
out_npy = np.maximum(lhs_npy, rhs_npy)
elif typ == "minimum":
out_npy = np.minimum(lhs_npy, rhs_npy)
lhs_nd = tvm.nd.array(lhs_npy, tvm.cuda())
rhs_nd = tvm.nd.array(rhs_npy, tvm.cuda())
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), tvm.cuda())
for _ in range(2):
fcuda(lhs_nd, rhs_nd, out_nd)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)
if __name__ == "__main__":
test_broadcast_to((1,), (10,))
test_broadcast_to((1, 1, 5, 4), (3, 4, 4, 4, 5, 4))
test_broadcast_to((1, 128, 1, 32), (64, 128, 64, 32))
test_broadcast_binary_op((5, 2, 3), (2, 1), typ="add")
test_broadcast_binary_op((5, 64, 128), (2, 5, 64, 1), typ="mul")
test_broadcast_binary_op((2, 3, 1, 32), (64, 32), typ="div")
test_broadcast_binary_op((1, 32), (64, 32), typ="sub")
test_broadcast_binary_op((32,), (64, 32), typ="maximum")
test_broadcast_binary_op((1, 2, 2, 1, 32), (64, 32), typ="minimum")
| 4,594 | 33.037037 | 80 | py |
tvm | tvm-main/apps/topi_recipe/rnn/lstm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""LSTM Example, still work in progress.."""
import os
import numpy as np
import tvm
from tvm import te
from tvm.contrib import nvcc
# Quick knobs
TASK = "lstm"
USE_MANUAL_CODE = False
PERSIST_KERNEL = True
DETECT_GLOBAL_BARRIER = PERSIST_KERNEL
SKIP_CHECK = False
UNROLL_WLOAD = True
@tvm.register_func("tvm_callback_cuda_compile", override=True)
def tvm_callback_cuda_compile(code, target):
"""Use nvcc compiler for better perf."""
ptx = nvcc.compile_cuda(code, target_format="ptx")
return ptx
def write_code(code, fname):
with open(fname, "w") as f:
f.write(code)
@tvm.register_func
def tvm_callback_cuda_postproc(code, target):
if not os.path.exists("perf"):
os.mkdir("perf")
write_code(code, "perf/%s_generated.cu" % TASK)
if USE_MANUAL_CODE:
code = open("perf/%s_manual.cu" % TASK).read()
return code
def lstm():
if not PERSIST_KERNEL:
raise ValueError("Non persist LSTM not yet supported")
num_thread_y = 8
num_thread_x = 16 * 3 // 2
num_sm = 24
n_num_step = 128
num_step = te.var("num_step")
num_hidden = 1152 // 2
batch_size = 1
# Global transition matrix
# Input hidden channel can be pre-caculated by a gemm
Xi2h = te.placeholder((num_step, batch_size, 4, num_hidden), name="Xi2h")
# Only handle hidden transition, saves space.
Wh2h = te.placeholder((4, num_hidden, num_hidden), name="Wh2h")
# h: output hidden state, c: cell state.
s_state_h = te.placeholder((num_step, batch_size, num_hidden))
s_state_c = te.placeholder((num_step, batch_size, num_hidden))
s_init_c = te.compute((1, batch_size, num_hidden), lambda *i: 0.0, name="init_c")
s_init_h = te.compute((1, batch_size, num_hidden), lambda *i: 0.0, name="init_h")
# LSTM transition
k = te.reduce_axis((0, num_hidden), name="ki2h")
s_h2h = te.compute(
(num_step, batch_size, 4, num_hidden),
lambda t, i, x, j: te.sum(s_state_h[t - 1, i, k] * Wh2h[x, j, k], axis=k),
name="s_h2h",
)
# Gate rules
gates = te.compute(Xi2h.shape, lambda *i: Xi2h(*i) + s_h2h(*i), name="gates")
gshape = (num_step, batch_size, num_hidden)
in_gate = te.compute(gshape, lambda t, i, j: te.sigmoid(gates[t, i, 0, j]), name="in_gate")
in_transform = te.compute(
gshape, lambda t, i, j: te.tanh(gates[t, i, 1, j]), name="in_transform"
)
forget_gate = te.compute(
gshape, lambda t, i, j: te.sigmoid(gates[t, i, 2, j]), name="forget_gate"
)
out_gate = te.compute(gshape, lambda t, i, j: te.sigmoid(gates[t, i, 3, j]), name="out_gate")
next_c = te.compute(
gshape,
lambda t, i, j: forget_gate[t, i, j] * s_state_c[t - 1, i, j]
+ in_gate[t, i, j] * in_transform[t, i, j],
name="next_c",
)
next_h = te.compute(
gshape, lambda t, i, j: out_gate[t, i, j] * te.tanh(next_c[t, i, j]), name="next_h"
)
update_c = te.compute(gshape, lambda *i: next_c(*i), name="update_c")
update_h = te.compute(gshape, lambda *i: next_h(*i), name="update_h")
# schedule
scan_h, scan_c = tvm.te.scan(
[s_init_h, s_init_c],
[update_h, update_c],
[s_state_h, s_state_c],
inputs=[Xi2h],
name="lstm_scan",
)
# schedule
s = te.create_schedule(scan_h.op)
# Inline gate computations
s[gates].compute_inline()
s[in_gate].compute_inline()
s[in_transform].compute_inline()
s[forget_gate].compute_inline()
s[out_gate].compute_inline()
block_x = te.thread_axis((0, num_sm), "blockIdx.x")
thread_x = te.thread_axis((0, num_thread_x), "threadIdx.x")
thread_y = te.thread_axis((0, num_thread_y), "threadIdx.y")
s_state_h_S = s.cache_read(s_state_h, "shared", [s_h2h])
s_state_c_S = s.cache_read(s_state_c, "shared", [next_c])
Wh2hL = s.cache_read(Wh2h, "local", [s_h2h])
ko, ki = s[s_h2h].split(s[s_h2h].op.reduce_axis[0], nparts=num_thread_y)
s_h2h_rf = s.rfactor(s_h2h, ko)
s[s_h2h].bind(s[s_h2h].op.reduce_axis[0], thread_y)
s[s_h2h_rf].compute_at(s[s_h2h], s[s_h2h].op.reduce_axis[0])
if PERSIST_KERNEL:
s[scan_h.op].env_threads([block_x, thread_y, thread_x])
s[Wh2hL].compute_at(s[scan_h.op], thread_x)
else:
s[Wh2hL].compute_at(s[s_h2h], s[s_h2h].op.axis[3])
if UNROLL_WLOAD:
s[Wh2hL].unroll(Wh2hL.op.axis[0])
s[Wh2hL].unroll(Wh2hL.op.axis[2])
s[s_state_h_S].compute_at(s[s_h2h_rf], s[s_h2h_rf].op.axis[3])
s[s_state_c_S].compute_at(s[scan_h.op], s[scan_h].op.scan_axis)
for ss in [s_state_h_S]:
xo, xi = s[ss].split(ss.op.axis[2], factor=num_thread_x * num_thread_y)
ty, xi = s[ss].split(xi, nparts=num_thread_y)
tx, xi = s[ss].split(xi, nparts=num_thread_x)
s[ss].bind(ty, thread_y)
s[ss].bind(tx, thread_x)
for init in [s_init_c, s_init_h]:
bx, xi = s[init].split(init.op.axis[2], nparts=num_sm)
tx, xi = s[init].split(xi, nparts=num_thread_x)
s[init].bind(bx, block_x)
s[init].bind(tx, thread_x)
s[next_c].set_store_predicate(thread_y.equal(0))
s[next_h].set_store_predicate(thread_y.equal(0))
for update in [update_c, update_h]:
bx, xi = s[update].split(s[update].op.axis[2], nparts=num_sm)
tx, xi = s[update].split(xi, nparts=num_thread_x)
s[update].bind(bx, block_x)
s[update].bind(tx, thread_x)
s[update].set_store_predicate(thread_y.equal(0))
# verify we can lower correctly
def check_device(target):
num_step = n_num_step
flstm = tvm.build(s, [Xi2h, Wh2h, scan_h, scan_c], target)
dev = tvm.cuda(0) if target == "cuda" else tvm.cl(0)
# launch the kernel.
scan_h_np = np.zeros((num_step, batch_size, num_hidden)).astype("float32")
scan_c_np = np.zeros((num_step, batch_size, num_hidden)).astype("float32")
Xi2h_np = np.random.normal(size=(num_step, batch_size, 4, num_hidden)).astype("float32")
Wh2h_np = np.random.normal(size=(4, num_hidden, num_hidden)).astype("float32")
scan_h_a = tvm.nd.array(scan_h_np, dev)
scan_c_a = tvm.nd.array(scan_c_np, dev)
Xi2h_a = tvm.nd.array(Xi2h_np, dev)
Wh2h_a = tvm.nd.array(Wh2h_np, dev)
flstm(Xi2h_a, Wh2h_a, scan_h_a, scan_c_a)
dev.sync()
# measure time cost of second step.
evaluator = flstm.time_evaluator(flstm.entry_name, dev, 1, repeat=1000)
eval_result = evaluator(Xi2h_a, Wh2h_a, scan_h_a, scan_c_a)
print("Time cost=%g" % eval_result.mean)
# set unroll_explicit for more readable code.
with tvm.transform.PassContext(
config={
"tir.UnrollLoop": {
"auto_max_step": 128,
},
"tir.detect_global_barrier": DETECT_GLOBAL_BARRIER,
}
):
check_device("cuda")
if __name__ == "__main__":
lstm()
| 7,707 | 36.417476 | 97 | py |
tvm | tvm-main/apps/topi_recipe/rnn/matexp.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Matrix exponential example.
This is an example for matrix exponential,
which calculates the following recursion formula
```math
X[t] = dot(X[t-1], W)
```
"""
import argparse
import os
import time
import numpy as np
import tvm
from tvm import te
from tvm.contrib import nvcc
# Quick knobs
TASK = "matexp"
USE_MANUAL_CODE = False
PERSIST_KERNEL = True
DETECT_GLOBAL_BARRIER = PERSIST_KERNEL
SKIP_CHECK = False
@tvm.register_func("tvm_callback_cuda_compile", override=True)
def tvm_callback_cuda_compile(code, target):
"""Use nvcc compiler for better perf."""
ptx = nvcc.compile_cuda(code, target_format="ptx")
return ptx
def write_code(code, fname):
with open(fname, "w") as f:
f.write(code)
@tvm.register_func
def tvm_callback_cuda_postproc(code, target):
if not os.path.exists("perf"):
os.mkdir("perf")
write_code(code, "perf/%s_generated.cu" % TASK)
if USE_MANUAL_CODE:
code = open("perf/%s_manual.cu" % TASK).read()
return code
def rnn_matexp():
n_num_step = 128
n_num_hidden = 1152
n_batch_size = 4
detect_global_barrier = DETECT_GLOBAL_BARRIER
num_step = te.var("num_step")
num_hidden = tvm.runtime.convert(n_num_hidden)
batch_size = tvm.runtime.convert(n_batch_size)
num_thread_y = 8
num_thread_x = 16 * 3
num_sm = 24
Whh = te.placeholder((num_hidden, num_hidden), name="Whh")
s_init = te.compute((1, batch_size, num_hidden), lambda _, i, j: 1.0, name="init")
s_state = te.placeholder((num_step, batch_size, num_hidden))
kh = te.reduce_axis((0, num_hidden), name="kh")
s_update = te.compute(
(num_step, batch_size, num_hidden),
lambda t, i, j: te.sum(s_state[t - 1, i, kh] * Whh[kh, j], axis=kh),
name="update",
)
s_scan = tvm.te.scan(s_init, s_update, s_state)
# schedule
s = te.create_schedule(s_scan.op)
CL = s_update
SS = s.cache_read(s_state, "shared", [CL])
SL = s.cache_read(SS, "local", [CL])
WhhL = s.cache_read(Whh, "local", [CL])
ko, ki = s[CL].split(s[CL].op.reduce_axis[0], nparts=num_thread_y)
CLF = s.rfactor(CL, ko)
block_x = te.thread_axis((0, num_sm), "blockIdx.x")
thread_x = te.thread_axis((0, num_thread_x), "threadIdx.x")
thread_y = te.thread_axis((0, num_thread_y), "threadIdx.y")
if PERSIST_KERNEL:
s[s_scan.op].env_threads([block_x, thread_y, thread_x])
bx, xi = s[s_init].split(s_init.op.axis[2], nparts=num_sm)
tx, xi = s[s_init].split(xi, nparts=num_thread_x)
s[s_init].bind(bx, block_x)
s[s_init].bind(tx, thread_x)
bx, xi = s[s_update].split(s[CL].op.axis[2], nparts=num_sm)
tx, xi = s[s_update].split(xi, nparts=num_thread_x)
s[s_update].bind(bx, block_x)
s[s_update].bind(tx, thread_x)
s[CL].bind(s[CL].op.reduce_axis[0], thread_y)
s[CLF].compute_at(s[CL], s[CL].op.reduce_axis[0])
# Duplicate store predicate.
s[CL].set_store_predicate(thread_y.equal(0))
if PERSIST_KERNEL:
s[WhhL].compute_at(s[s_scan], thread_x)
s[WhhL].unroll(WhhL.op.axis[0])
else:
s[WhhL].compute_at(s[CLF], CLF.op.axis[3])
kr, ki = s[CLF].split(CLF.op.reduce_axis[0], nparts=1)
ko, ki = s[CLF].split(ki, factor=4)
s[SS].compute_at(s[CLF], kr)
s[SL].compute_at(s[CLF], ko)
xo, xi = s[SS].split(SS.op.axis[2], factor=num_thread_x * num_thread_y * 3)
ty, xi = s[SS].split(xi, nparts=num_thread_y)
tx, xi = s[SS].split(xi, nparts=num_thread_x)
s[SS].bind(ty, thread_y)
s[SS].bind(tx, thread_x)
def check_device(target):
with tvm.transform.PassContext(
config={
"tir.UnrollLoop": {
"auto_max_step": 128,
},
"tir.detect_global_barrier": detect_global_barrier,
}
):
f = tvm.build(s, [s_scan, Whh], target)
dev = tvm.cuda(0) if target == "cuda" else tvm.cl(0)
# launch the kernel.
res_np = np.zeros((n_num_step, n_batch_size, n_num_hidden)).astype("float32")
Whh_np = np.zeros((n_num_hidden, n_num_hidden)).astype("float32")
Whh_np[:] = 2.0 / n_num_hidden
Whh_np[:, n_num_hidden // 2 :] = 0
res_a = tvm.nd.array(res_np, dev)
Whh_a = tvm.nd.array(Whh_np, dev)
# Skip first pass as it is compilation
f(res_a, Whh_a)
dev.sync()
# measure time cost of second step.
tstart = time.time()
f(res_a, Whh_a)
dev.sync()
tgap = time.time() - tstart
print("Time cost=%g" % tgap)
# correctness
if not SKIP_CHECK:
res_cuda = res_a.numpy()
res_cmp = np.ones_like(res_np).astype("float64")
Whh_np = Whh_np.astype("float64")
for t in range(1, n_num_step):
res_cmp[t][:] = np.dot(res_cmp[t - 1], Whh_np)
for i in range(n_num_step):
for j in range(n_num_hidden):
if abs(res_cmp[i, 0, j] - res_cuda[i, 0, j]) > 1e-5:
print("%d, %d: %g vs %g" % (i, j, res_cmp[i, 0, j], res_cuda[i, 0, j]))
tvm.testing.assert_allclose(res_cuda, res_cmp, rtol=1e-3)
check_device("cuda")
if __name__ == "__main__":
rnn_matexp()
| 6,054 | 32.638889 | 95 | py |
tvm | tvm-main/apps/topi_recipe/conv/test_conv_int8_arm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable-msg=too-many-arguments, too-many-locals, assignment-from-no-return
""" Conv Int8 functional and performance testing"""
import sys
import logging
import numpy as np
import tvm
from tvm import te
from tvm import topi
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOGGER = logging.getLogger("test_conv_int8_intel")
LOGGER.disabled = False
# All the WORKLOADS from Resnet except first layer
# Workload is ['height', 'width', 'in_filter', 'out_filter',
# 'hkernel', 'wkernel', 'hpad', 'wpad', 'hstride', 'wstride'])
WORKLOADS = [
(56, 56, 64, 64, 3, 3, 1, 1, 1, 1),
(56, 56, 64, 64, 1, 1, 0, 0, 1, 1),
(56, 56, 64, 128, 3, 3, 1, 1, 2, 2),
(56, 56, 64, 128, 1, 1, 0, 0, 2, 2),
(28, 28, 128, 128, 3, 3, 1, 1, 1, 1),
(28, 28, 128, 256, 3, 3, 1, 1, 2, 2),
(28, 28, 128, 256, 1, 1, 0, 0, 2, 2),
(14, 14, 256, 256, 3, 3, 1, 1, 1, 1),
(14, 14, 256, 512, 3, 3, 1, 1, 2, 2),
(14, 14, 256, 512, 1, 1, 0, 0, 2, 2),
(7, 7, 512, 512, 3, 3, 1, 1, 1, 1),
(56, 56, 64, 256, 1, 1, 0, 0, 1, 1),
(56, 56, 256, 64, 1, 1, 0, 0, 1, 1),
(56, 56, 256, 128, 1, 1, 0, 0, 2, 2),
(28, 28, 128, 512, 1, 1, 0, 0, 1, 1),
(56, 56, 256, 512, 1, 1, 0, 0, 2, 2),
(28, 28, 512, 128, 1, 1, 0, 0, 1, 1),
(28, 28, 512, 256, 1, 1, 0, 0, 2, 2),
(14, 14, 256, 1024, 1, 1, 0, 0, 1, 1),
(28, 28, 512, 1024, 1, 1, 0, 0, 2, 2),
(14, 14, 1024, 256, 1, 1, 0, 0, 1, 1),
(14, 14, 1024, 512, 1, 1, 0, 0, 2, 2),
(7, 7, 512, 2048, 1, 1, 0, 0, 1, 1),
(14, 14, 1024, 2048, 1, 1, 0, 0, 2, 2),
(7, 7, 2048, 512, 1, 1, 0, 0, 1, 1),
]
TARGET_NAME = "llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr=+v8.2a,+dotprod"
NUM_VEC_LANES = 16
DEV = tvm.device(TARGET_NAME, 0)
def get_shape(
im_height, im_width, in_filter, out_filter, k_h, k_w, hpad, wpad, hstride, wstride, out_dtype
):
"""
Finds out the shape of all data structures
"""
data_shape = (1, in_filter // NUM_VEC_LANES, im_height, im_width, NUM_VEC_LANES)
if out_dtype == "int32" or out_dtype == "uint32":
kernel_shape = (
out_filter // NUM_VEC_LANES,
in_filter // NUM_VEC_LANES,
k_h,
k_w,
NUM_VEC_LANES // 4,
NUM_VEC_LANES,
4,
)
elif out_dtype == "float32":
kernel_shape = (
out_filter // NUM_VEC_LANES,
in_filter // NUM_VEC_LANES,
k_h,
k_w,
NUM_VEC_LANES,
NUM_VEC_LANES,
)
out_height = (im_height + 2 * hpad - k_h) // hstride + 1
out_width = (im_width + 2 * wpad - k_w) // wstride + 1
o_shape = (1, out_filter // NUM_VEC_LANES, out_height, out_width, NUM_VEC_LANES)
return (data_shape, kernel_shape, o_shape)
def run_inference(
data_dtype,
kernel_dtype,
out_dtype,
im_height,
im_width,
in_filter,
out_filter,
k_h,
k_w,
hpad,
wpad,
hstride,
wstride,
):
"""
Runs the inference and checks the functional correctness between
compute and schedule outputs
"""
(data_shape, kernel_shape, o_shape) = get_shape(
im_height,
im_width,
in_filter,
out_filter,
k_h,
k_w,
hpad,
wpad,
hstride,
wstride,
out_dtype,
)
# Create TVM placeholders
data = te.placeholder(data_shape, name="data", dtype=data_dtype)
kernel = te.placeholder(kernel_shape, name="kernel", dtype=kernel_dtype)
# Create the numpy arrays to be used for executing conv models
if data_dtype == "float32":
data_array = tvm.nd.array(np.random.rand(*data_shape).astype(dtype=data_dtype), DEV)
kernel_array = tvm.nd.array(np.random.rand(*kernel_shape).astype(dtype=kernel_dtype), DEV)
else:
data_array = tvm.nd.array(np.random.randint(100, size=data_shape).astype(data_dtype))
kernel_array = tvm.nd.array(np.random.randint(100, size=kernel_shape).astype(kernel_dtype))
# c_orig will be used for declaration ouptut
# c_sch will be used for scheduled computation output
c_orig = tvm.nd.array(np.zeros(o_shape, dtype=out_dtype), DEV)
c_sch = tvm.nd.array(np.zeros(o_shape, dtype=out_dtype), DEV)
with tvm.target.Target(TARGET_NAME):
if out_dtype == "float32":
conv = topi.nn.conv2d_NCHWc(
data,
kernel,
stride=hstride,
padding=hpad,
dilation=(1, 1),
layout="NCHWc",
out_layout="NCHWc",
out_dtype=out_dtype,
)
else:
conv = topi.nn.conv2d_NCHWc_int8(
data,
kernel,
strides=hstride,
padding=hpad,
dilation=(1, 1),
layout="NCHWc",
out_layout="NCHWc",
out_dtype=out_dtype,
)
out = topi.nn.relu(conv)
sch = te.create_schedule(out.op)
func = tvm.build(sch, [data, kernel, out], target=TARGET_NAME, name="out")
func(data_array, kernel_array, c_orig)
LOGGER.debug(tvm.lower(sch, [data, kernel], simple_mode=True))
# Generate and run the optimized schedule
if out_dtype == "float32":
sconv = topi.generic.nn.schedule_conv2d_NCHWc(outs=[out])
else:
sconv = topi.generic.nn.schedule_conv2d_NCHWc_int8(outs=[out])
func = tvm.build(sconv, [data, kernel, out], target=TARGET_NAME, name="conv")
func(data_array, kernel_array, c_sch)
# Functional check
if data_dtype == "uint8":
np.testing.assert_equal(c_orig.numpy(), c_sch.numpy())
else:
assert np.allclose(c_orig.numpy(), c_sch.numpy())
evaluator = func.time_evaluator(func.entry_name, DEV, number=1000)
LOGGER.debug(tvm.lower(sconv, [data, kernel], simple_mode=True))
return evaluator(data_array, kernel_array, c_sch).mean
if __name__ == "__main__":
LOGGER.info("Workload, Kernel_size, FP32_time, INT8_time, Speedup")
SPEEDUP_ARRAY = []
for i, wkl in enumerate(WORKLOADS):
for dtype in ["uint", "int"]:
fp32_time = run_inference("float32", "float32", "float32", *wkl)
int8_time = run_inference("%s8" % dtype, "%s8" % dtype, "%s32" % dtype, *wkl)
kernel_h = wkl[4]
kernel_w = wkl[5]
LOGGER.info(
"[%s] Workload#" % dtype
+ str(i)
+ ", "
+ str(kernel_h)
+ "x"
+ str(kernel_w)
+ ", "
+ str(fp32_time)
+ ", "
+ str(int8_time)
+ ", "
+ str(fp32_time / int8_time)
)
SPEEDUP_ARRAY.append(fp32_time / int8_time)
LOGGER.info("Average speedup --> %s" % str(sum(SPEEDUP_ARRAY) / float(len(SPEEDUP_ARRAY))))
| 7,792 | 33.790179 | 99 | py |
tvm | tvm-main/apps/topi_recipe/conv/depthwise_conv2d_test.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import numpy as np
import tvm
from scipy import signal
from tvm import te, topi
from tvm.contrib import nvcc
from tvm.topi.cuda.depthwise_conv2d import (
schedule_depthwise_conv2d_nchw,
schedule_depthwise_conv2d_nhwc,
)
from tvm.topi.utils import get_const_tuple
TASK = "depthwise_conv2d"
USE_MANUAL_CODE = False
@tvm.register_func("tvm_callback_cuda_compile", override=True)
def tvm_callback_cuda_compile(code, target):
ptx = nvcc.compile_cuda(code, target_format="ptx")
return ptx
def write_code(code, fname):
with open(fname, "w") as f:
f.write(code)
@tvm.register_func
def tvm_callback_cuda_postproc(code, target):
if not os.path.exists("perf"):
os.mkdir("perf")
write_code(code, "perf/%s_generated.cu" % TASK)
if USE_MANUAL_CODE:
code = open("perf/%s_manual.cu" % TASK).read()
return code
def test_depthwise_conv2d_nchw():
"""You may test different settings."""
batch = 1
in_channel = 256
in_height = 96
in_width = 96
filter_channel = in_channel
channel_multiplier = 1
filter_height = 3
filter_width = 3
stride_h = 1
stride_w = 1
padding = "SAME" # or 'VALID'
# Placeholder
Input = te.placeholder((batch, in_channel, in_height, in_width), name="Input")
Filter = te.placeholder(
(filter_channel, channel_multiplier, filter_height, filter_width), name="Filter"
)
Stride = [stride_h, stride_w]
Scale = te.placeholder((in_channel * channel_multiplier,), name="Scale")
Shift = te.placeholder((in_channel * channel_multiplier,), name="Shift")
# Declare
DepthwiseConv2d = topi.nn.depthwise_conv2d_nchw(Input, Filter, Stride, padding)
ScaleShift = topi.nn.scale_shift_nchw(DepthwiseConv2d, Scale, Shift)
Relu = topi.nn.relu(ScaleShift)
# Schedule
s1 = schedule_depthwise_conv2d_nchw(DepthwiseConv2d)
s2 = schedule_depthwise_conv2d_nchw(ScaleShift)
s3 = schedule_depthwise_conv2d_nchw(Relu)
input_np = np.random.uniform(size=get_const_tuple(Input.shape)).astype(Input.dtype)
filter_np = np.random.uniform(size=get_const_tuple(Filter.shape)).astype(Filter.dtype)
scale_np = np.random.uniform(size=(in_channel * channel_multiplier)).astype(Scale.dtype)
shift_np = np.random.uniform(size=(in_channel * channel_multiplier)).astype(Shift.dtype)
def check_device(device):
if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
dev = tvm.device(device, 0)
# Build the kernel
f1 = tvm.build(s1, [Input, Filter, DepthwiseConv2d], device)
f2 = tvm.build(s2, [Input, Filter, Scale, Shift, ScaleShift], device)
f3 = tvm.build(s3, [Input, Filter, Scale, Shift, Relu], device)
# Prepare data
input_tvm = tvm.nd.array(input_np, dev)
filter_tvm = tvm.nd.array(filter_np, dev)
scale_tvm = tvm.nd.array(scale_np, dev)
shift_tvm = tvm.nd.array(shift_np, dev)
depthwise_conv2d_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(DepthwiseConv2d.shape), dtype=DepthwiseConv2d.dtype), dev
)
scale_shift_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(ScaleShift.shape), dtype=ScaleShift.dtype), dev
)
relu_tvm = tvm.nd.array(np.zeros(shape=get_const_tuple(Relu.shape), dtype=Relu.dtype), dev)
# Measure time cost of kernel 1 (depthwise_conv2d)
timer_1 = f1.time_evaluator(f1.entry_name, dev, number=1000)
tcost_1 = timer_1(input_tvm, filter_tvm, depthwise_conv2d_tvm).mean
# Measure time cost of kernel 2 (depthwise_conv2d + scale_shift)
timer_2 = f2.time_evaluator(f2.entry_name, dev, number=1000)
tcost_2 = timer_2(input_tvm, filter_tvm, scale_tvm, shift_tvm, scale_shift_tvm).mean
# Measure time cost of kernel 3 (depthwise_conv2d + scale_shift + relu)
timer_3 = f3.time_evaluator(f3.entry_name, dev, number=1000)
tcost_3 = timer_3(input_tvm, filter_tvm, scale_tvm, shift_tvm, relu_tvm).mean
print("Input shape = " + str(get_const_tuple(Input.shape)))
print("Filter shape = " + str(get_const_tuple(Filter.shape)))
print("Stride = (%d, %d)" % (stride_h, stride_w))
print("padding = %s\n" % padding)
print("Output shape = " + str(get_const_tuple(DepthwiseConv2d.shape)))
print("average time cost of 1000 runs (depthwise_conv2d) = %g us" % (tcost_1 * 1e6))
print(
"average time cost of 1000 runs (depthwise_conv2d + scale_shift) = %g us"
% (tcost_2 * 1e6)
)
print(
"average time cost of 1000 runs (depthwise_conv2d + scale_shift + relu) = %g us"
% (tcost_3 * 1e6)
)
# correctness
depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nchw(
input_np, filter_np, stride=[stride_h, stride_w], padding=padding
)
scale_shift_scipy = np.zeros(shape=get_const_tuple(ScaleShift.shape))
for c in range(in_channel * channel_multiplier):
scale_shift_scipy[:, c, :, :] = (
depthwise_conv2d_scipy[:, c, :, :] * scale_np[c] + shift_np[c]
)
relu_scipy = np.maximum(scale_shift_scipy, 0)
tvm.testing.assert_allclose(depthwise_conv2d_tvm.numpy(), depthwise_conv2d_scipy, rtol=1e-5)
tvm.testing.assert_allclose(scale_shift_tvm.numpy(), scale_shift_scipy, rtol=1e-5)
tvm.testing.assert_allclose(relu_tvm.numpy(), relu_scipy, rtol=1e-5)
print("success")
for device in ["cuda", "opencl", "rocm"]:
with tvm.transform.PassContext(
config={"tir.UnrollLoop": {"auto_max_step": 128, "explicit_unroll": device != "rocm"}}
):
check_device(device)
def test_depthwise_conv2d_nhwc():
"""You may test different settings."""
batch = 1
in_channel = 256
in_height = 96
in_width = 96
filter_channel = in_channel
channel_multiplier = 1
filter_height = 3
filter_width = 3
stride_h = 1
stride_w = 1
padding = "SAME" # or 'VALID'
# Placeholder
Input = te.placeholder((batch, in_height, in_width, in_channel), name="Input")
Filter = te.placeholder(
(filter_height, filter_width, filter_channel, channel_multiplier), name="Filter"
)
Stride = [stride_h, stride_w]
Scale = te.placeholder((in_channel * channel_multiplier,), name="Scale")
Shift = te.placeholder((in_channel * channel_multiplier,), name="Shift")
# Declare
DepthwiseConv2d = topi.nn.depthwise_conv2d_nhwc(Input, Filter, Stride, padding)
ScaleShift = topi.nn.scale_shift_nhwc(DepthwiseConv2d, Scale, Shift)
Relu = topi.nn.relu(ScaleShift)
# Schedule
s1 = schedule_depthwise_conv2d_nhwc(DepthwiseConv2d)
s2 = schedule_depthwise_conv2d_nhwc(ScaleShift)
s3 = schedule_depthwise_conv2d_nhwc(Relu)
input_np = np.random.uniform(size=get_const_tuple(Input.shape)).astype(Input.dtype)
filter_np = np.random.uniform(size=get_const_tuple(Filter.shape)).astype(Filter.dtype)
scale_np = np.random.uniform(size=(in_channel * channel_multiplier)).astype(Scale.dtype)
shift_np = np.random.uniform(size=(in_channel * channel_multiplier)).astype(Shift.dtype)
def check_device(device):
if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
dev = tvm.device(device, 0)
# Build the kernel
f1 = tvm.build(s1, [Input, Filter, DepthwiseConv2d], device)
f2 = tvm.build(s2, [Input, Filter, Scale, Shift, ScaleShift], device)
f3 = tvm.build(s3, [Input, Filter, Scale, Shift, Relu], device)
# Prepare data
input_tvm = tvm.nd.array(input_np, dev)
filter_tvm = tvm.nd.array(filter_np, dev)
scale_tvm = tvm.nd.array(scale_np, dev)
shift_tvm = tvm.nd.array(shift_np, dev)
depthwise_conv2d_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(DepthwiseConv2d.shape), dtype=DepthwiseConv2d.dtype), dev
)
scale_shift_tvm = tvm.nd.array(
np.zeros(shape=get_const_tuple(ScaleShift.shape), dtype=ScaleShift.dtype), dev
)
relu_tvm = tvm.nd.array(np.zeros(shape=get_const_tuple(Relu.shape), dtype=Relu.dtype), dev)
# Measure time cost of kernel 1 (depthwise_conv2d)
timer_1 = f1.time_evaluator(f1.entry_name, dev, number=1000)
tcost_1 = timer_1(input_tvm, filter_tvm, depthwise_conv2d_tvm).mean
# Measure time cost of kernel 2 (depthwise_conv2d + scale_shift)
timer_2 = f2.time_evaluator(f2.entry_name, dev, number=1000)
tcost_2 = timer_2(input_tvm, filter_tvm, scale_tvm, shift_tvm, scale_shift_tvm).mean
# Measure time cost of kernel 3 (depthwise_conv2d + scale_shift + relu)
timer_3 = f3.time_evaluator(f3.entry_name, dev, number=1000)
tcost_3 = timer_3(input_tvm, filter_tvm, scale_tvm, shift_tvm, relu_tvm).mean
print("Input shape = " + str(get_const_tuple(Input.shape)))
print("Filter shape = " + str(get_const_tuple(Filter.shape)))
print("Stride = (%d, %d)" % (stride_h, stride_w))
print("padding = %s\n" % padding)
print("Output shape = " + str(get_const_tuple(DepthwiseConv2d.shape)))
print("average time cost of 1000 runs (depthwise_conv2d) = %g us" % (tcost_1 * 1e6))
print(
"average time cost of 1000 runs (depthwise_conv2d + scale_shift) = %g us"
% (tcost_2 * 1e6)
)
print(
"average time cost of 1000 runs (depthwise_conv2d + scale_shift + relu) = %g us"
% (tcost_3 * 1e6)
)
# correctness
depthwise_conv2d_scipy = tvm.topi.testing.depthwise_conv2d_python_nhwc(
input_np, filter_np, stride=[stride_h, stride_w], padding=padding
)
scale_shift_scipy = np.zeros(shape=get_const_tuple(ScaleShift.shape))
for c in range(in_channel * channel_multiplier):
scale_shift_scipy[:, :, :, c] = (
depthwise_conv2d_scipy[:, :, :, c] * scale_np[c] + shift_np[c]
)
relu_scipy = np.maximum(scale_shift_scipy, 0)
tvm.testing.assert_allclose(depthwise_conv2d_tvm.numpy(), depthwise_conv2d_scipy, rtol=1e-5)
tvm.testing.assert_allclose(scale_shift_tvm.numpy(), scale_shift_scipy, rtol=1e-5)
tvm.testing.assert_allclose(relu_tvm.numpy(), relu_scipy, rtol=1e-5)
print("success")
for device in ["cuda", "opencl", "rocm"]:
with tvm.transform.PassContext(
config={"tir.UnrollLoop": {"auto_max_step": 128, "explicit_unroll": device != "cuda"}}
):
check_device(device)
if __name__ == "__main__":
test_depthwise_conv2d_nchw()
test_depthwise_conv2d_nhwc()
| 11,648 | 42.466418 | 100 | py |
tvm | tvm-main/apps/topi_recipe/conv/test_conv2d_hwcn_map.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example code to do convolution."""
import os
import numpy as np
import tvm
from tvm import te, topi
from tvm.contrib import nvcc
from tvm.topi.utils import get_const_tuple
TASK = "conv2d_hwcn_map"
USE_MANUAL_CODE = False
@tvm.register_func("tvm_callback_cuda_compile", override=True)
def tvm_callback_cuda_compile(code, target):
ptx = nvcc.compile_cuda(code, target_format="ptx")
return ptx
def write_code(code, fname):
with open(fname, "w") as f:
f.write(code)
@tvm.register_func
def tvm_callback_cuda_postproc(code, target):
if not os.path.exists("perf"):
os.mkdir("perf")
write_code(code, "perf/%s_generated.cu" % TASK)
if USE_MANUAL_CODE:
code = open("perf/%s_manual.cu" % TASK).read()
return code
def test_conv2d_hwcn_map():
batch = 64
in_channel = 128
in_height = 16
in_width = 16
num_filter = 128
kernel = 3
stride = 2
padding = "SAME"
A = te.placeholder((in_height, in_width, in_channel, batch), name="A")
W = te.placeholder((kernel, kernel, in_channel, num_filter), name="W")
B = topi.nn.conv2d_hwcn(A, W, stride, padding)
C = topi.nn.relu(B)
s1 = topi.cuda.schedule_conv2d_hwcn([B])
s2 = topi.cuda.schedule_conv2d_hwcn([C])
a_np = np.random.uniform(size=get_const_tuple(A.shape)).astype(A.dtype)
w_np = np.random.uniform(size=get_const_tuple(W.shape)).astype(W.dtype)
b_np = tvm.topi.testing.conv2d_hwcn_python(a_np, w_np, stride, padding)
c_np = np.maximum(b_np, 0)
def check_device(device):
if not tvm.runtime.enabled(device):
print("Skip because %s is not enabled" % device)
return
dev = tvm.device(device, 0)
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
c = tvm.nd.array(np.zeros(get_const_tuple(C.shape), dtype=C.dtype), dev)
with tvm.transform.PassContext(
config={
"tir.UrollLoop": {"auto_unroll_max_step": 128, "explicit_unroll": device == "rocm"}
}
):
func1 = tvm.build(s1, [A, W, B], device)
func1(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
func2 = tvm.build(s2, [A, W, C], device)
func2(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)
for device in ["cuda", "opencl", "rocm"]:
check_device(device)
if __name__ == "__main__":
test_conv2d_hwcn_map()
| 3,341 | 32.089109 | 99 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.