repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
db434/nn-restrict | models/resnet.py | import torch.nn as nn
import math
import structured.fully_connected as fc
models = {"CIFAR-10": ["resnet20", "resnet32", "resnet44", "resnet56",
"resnet110"],
"ImageNet": ["resnet18", "resnet34", "resnet50", "resnet101",
"resnet152"]}
def conv3x3(in_planes, out_planes, conv2d, args, stride=1):
"""3x3 convolution with padding"""
return conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False, args=args)
class BasicBlock(nn.Module):
# db434: removed batch-norm layers because this is included within the
# modified convolution implementations.
expansion = 1
def __init__(self, inplanes, planes, conv2d, args, stride=1,
downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, conv2d, args, stride)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, conv2d, args)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
"""Default Bottleneck module using fully-connected convolution."""
# db434: removed batch-norm layers because this is included within the
# modified convolution implementations.
expansion = 4
def __init__(self, inplanes, planes, conv2d, args, stride=1,
downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv2d(inplanes, planes, kernel_size=1, bias=False,
args=args)
self.conv2 = conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False, args=args)
self.conv3 = conv2d(planes, planes * 4, kernel_size=1, bias=False,
args=args)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
out = self.relu(out)
out = self.conv3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, input_channels=3, num_classes=1000,
conv2d=fc.Conv2d, args=None):
# db434: removed batch-norm layers because this is included within the
# modified convolution implementations.
w = args.width_multiplier
self.inplanes = 64 * w
super(ResNet, self).__init__()
self.conv1 = fc.Conv2d(input_channels, 64*w, kernel_size=7, stride=2,
padding=3, bias=False, args=args)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64*w, layers[0], conv2d, args)
self.layer2 = self._make_layer(block, 128*w, layers[1], conv2d, args,
stride=2)
self.layer3 = self._make_layer(block, 256*w, layers[2], conv2d, args,
stride=2)
self.layer4 = self._make_layer(block, 512*w, layers[3], conv2d, args,
stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc = conv2d(512 * w * block.expansion, num_classes,
kernel_size=1, args=args)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, conv2d, args, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False, args=args)
)
layers = [block(self.inplanes, planes, conv2d, args, stride,
downsample)]
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, conv2d, args))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1, 1, 1)
x = self.fc(x)
return x.view(x.size(0), -1)
class ResNetCifar(nn.Module):
"""Specialisation of ResNet for the CIFAR-10 dataset. Can still be applied
to ImageNet, but won't downsample very much.
The first layer is different, and there are only three blocks of layers
instead of four.
"""
def __init__(self, block, layers, input_channels=3, num_classes=10,
conv2d=fc.Conv2d, args=None):
w = args.width_multiplier
self.inplanes = 16 * w
super(ResNetCifar, self).__init__()
self.conv1 = fc.Conv2d(input_channels, 16*w, kernel_size=3,
padding=1, bias=False, args=args)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16*w, layers[0], conv2d, args)
self.layer2 = self._make_layer(block, 32*w, layers[1], conv2d, args,
stride=2)
self.layer3 = self._make_layer(block, 64*w, layers[2], conv2d, args,
stride=2)
self.avgpool = nn.AvgPool2d(8)
self.fc = conv2d(64*w*block.expansion, num_classes,
kernel_size=1, args=args)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, conv2d, args, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False,
args=args)
layers = [block(self.inplanes, planes, conv2d, args, stride,
downsample)]
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, conv2d, args))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1, 1, 1)
x = self.fc(x)
return x.view(x.size(0), -1)
def resnet18(**kwargs):
"""Constructs a ResNet-18 model."""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet20(**kwargs):
"""Constructs a ResNet-20 model for CIFAR-10. Paper claims 8.75% error."""
model = ResNetCifar(BasicBlock, [3, 3, 3], **kwargs)
return model
def resnet32(**kwargs):
"""Constructs a ResNet-32 model for CIFAR-10. Paper claims 7.51% error."""
model = ResNetCifar(BasicBlock, [5, 5, 5], **kwargs)
return model
def resnet34(**kwargs):
"""Constructs a ResNet-34 model."""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet44(**kwargs):
"""Constructs a ResNet-44 model for CIFAR-10. Paper claims 7.17% error."""
model = ResNetCifar(BasicBlock, [7, 7, 7], **kwargs)
return model
def resnet50(**kwargs):
"""Constructs a ResNet-50 model.
Pretrained: 23.85%/7.13% top1/top5 error."""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def resnet56(**kwargs):
"""Constructs a ResNet-56 model for CIFAR-10. Paper claims 6.97% error."""
model = ResNetCifar(BasicBlock, [9, 9, 9], **kwargs)
return model
def resnet101(**kwargs):
"""Constructs a ResNet-101 model."""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def resnet110(**kwargs):
"""Constructs a ResNet-110 model for CIFAR-10. Paper claims 6.43% error."""
model = ResNetCifar(BasicBlock, [18, 18, 18], **kwargs)
return model
def resnet152(**kwargs):
"""Constructs a ResNet-152 model."""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
|
db434/nn-restrict | modifiers/__init__.py | <reponame>db434/nn-restrict<filename>modifiers/__init__.py
from . import numbers
|
db434/nn-restrict | util/__init__.py | <gh_stars>0
__all__ = ["args", "checkpoint", "log", "stats"]
from . import args
from . import checkpoint
from . import log
from . import stats
|
db434/nn-restrict | models/squeezenet.py | <reponame>db434/nn-restrict<gh_stars>0
import torch
import torch.nn as nn
import torch.nn.init as init
import structured.fully_connected as fc
models = {"ImageNet": ["squeezenet1_0", "squeezenet1_1"]}
class Fire(nn.Module):
def __init__(self, inplanes, squeeze_planes,
expand1x1_planes, expand3x3_planes, conv2d, args):
super(Fire, self).__init__()
self.inplanes = inplanes
self.squeeze = conv2d(inplanes, squeeze_planes, kernel_size=1,
args=args)
self.squeeze_activation = nn.ReLU(inplace=True)
self.expand1x1 = conv2d(squeeze_planes, expand1x1_planes,
kernel_size=1, args=args)
self.expand1x1_activation = nn.ReLU(inplace=True)
self.expand3x3 = conv2d(squeeze_planes, expand3x3_planes,
kernel_size=3, padding=1, args=args)
self.expand3x3_activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.squeeze_activation(self.squeeze(x))
return torch.cat([
self.expand1x1_activation(self.expand1x1(x)),
self.expand3x3_activation(self.expand3x3(x))
], 1)
class SqueezeNet(nn.Module):
def __init__(self, version=1.0, input_channels=3, num_classes=1000,
conv2d=fc.Conv2d, args=None):
super(SqueezeNet, self).__init__()
w = args.width_multiplier # Very short name because used often below
if version not in [1.0, 1.1]:
raise ValueError("Unsupported SqueezeNet version {version}:"
"1.0 or 1.1 expected".format(version=version))
self.num_classes = num_classes
if version == 1.0:
self.features = nn.Sequential(
fc.Conv2d(input_channels, 96*w, kernel_size=7, stride=2,
args=args),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(96*w, 16*w, 64*w, 64*w, conv2d, args),
Fire(128*w, 16*w, 64*w, 64*w, conv2d, args),
Fire(128*w, 32*w, 128*w, 128*w, conv2d, args),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256*w, 32*w, 128*w, 128*w, conv2d, args),
Fire(256*w, 48*w, 192*w, 192*w, conv2d, args),
Fire(384*w, 48*w, 192*w, 192*w, conv2d, args),
Fire(384*w, 64*w, 256*w, 256*w, conv2d, args),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(512*w, 64*w, 256*w, 256*w, conv2d, args),
)
else:
self.features = nn.Sequential(
fc.Conv2d(input_channels, 64*w, kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(64*w, 16*w, 64*w, 64*w, conv2d, args),
Fire(128*w, 16*w, 64*w, 64*w, conv2d, args),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(128*w, 32*w, 128*w, 128*w, conv2d, args),
Fire(256*w, 32*w, 128*w, 128*w, conv2d, args),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256*w, 48*w, 192*w, 192*w, conv2d, args),
Fire(384*w, 48*w, 192*w, 192*w, conv2d, args),
Fire(384*w, 64*w, 256*w, 256*w, conv2d, args),
Fire(512*w, 64*w, 256*w, 256*w, conv2d, args),
)
# Final convolution is initialized differently form the rest
final_conv = conv2d(512*w, self.num_classes, kernel_size=1, args=args)
self.classifier = nn.Sequential(
nn.Dropout(p=0.5), # TODO remove this dropout?
final_conv,
nn.ReLU(inplace=True),
nn.AvgPool2d(13)
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m is final_conv:
init.normal(m.weight.data, mean=0.0, std=0.01)
else:
init.kaiming_uniform(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = self.classifier(x)
return x.view(x.size(0), self.num_classes)
def squeezenet1_0(**kwargs):
r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level
accuracy with 50x fewer parameters and <0.5MB model size"
<https://arxiv.org/abs/1602.07360>`_ paper.
"""
model = SqueezeNet(version=1.0, **kwargs)
return model
def squeezenet1_1(**kwargs):
r"""SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
"""
model = SqueezeNet(version=1.1, **kwargs)
return model
|
db434/nn-restrict | training/lr_schedule.py | import abc
import math
# TODO: These classes are very similar (but perhaps more general) than those in
# torch.optim.lr_scheduler. Consider using those instead.
class LRSchedule(object):
"""Base class for all learning rate schedules. Not to be used directly."""
def __init__(self):
return
@abc.abstractmethod
def get_learning_rate(self, epoch):
"""
Compute the learning rate to be used at the given epoch.
:param epoch: The current epoch.
:return: The learning rate to use.
"""
return
class StepSchedule(LRSchedule):
"""A schedule where the learning rate remains constant for multiple
epochs at a time."""
def __init__(self, initial_lr, steps):
"""
Create a schedule.
:param initial_lr: Learning rate at epoch 0.
:param steps: List of pairs telling when to drop the learning rate
and by how much. Use (epoch, factor), where a factor of 0.1 means
dividing the learning rate by 10.
"""
super(StepSchedule, self).__init__()
self.initial_lr = initial_lr
self.steps = steps
def get_learning_rate(self, epoch):
lr = self.initial_lr
for (drop_epoch, factor) in self.steps:
if epoch >= drop_epoch:
lr *= factor
else:
break
return lr
class LinearSchedule(LRSchedule):
"""A schedule where the learning rate changes by a constant amount each
epoch."""
def __init__(self, initial_lr, final_lr, epochs):
"""
Create a schedule.
:param initial_lr: Learning rate at epoch 0.
:param final_lr: Learning rate in final epoch.
:param epochs: Total number of epochs.
"""
super(LinearSchedule, self).__init__()
self.initial_lr = initial_lr
self.final_lr = final_lr
self.epochs = epochs
def get_learning_rate(self, epoch):
return self.initial_lr + (epoch / self.epochs) * \
(self.final_lr - self.initial_lr)
class CosineRestartSchedule(LRSchedule):
"""A schedule which continuously reduces the learning rate following a
cosine curve, but returns instantly to the initial learning rate instead
of following the cosine curve back up.
Based on https://arxiv.org/abs/1608.03983"""
def __init__(self, initial_lr, period):
"""
Create a schedule.
:param initial_lr: Learning rate at epoch 0.
:param period: Number of epochs taken to reduce learning rate to
zero. The learning rate will return to the initial value at the next
step.
"""
super(CosineRestartSchedule, self).__init__()
self.initial_lr = initial_lr
self.period = period
def get_learning_rate(self, epoch):
epochs = epoch % self.period
# cos has a range of [-1, 1].
# Add 1 so the whole range is positive: [0, 2].
# Multiply by 0.5 to create a scaling factor in [0, 1].
lr = 0.5 * self.initial_lr * (1 + math.cos(math.pi * epochs /
self.period))
return lr
|
db434/nn-restrict | models/mobilenet.py | <gh_stars>0
import torch.nn as nn
import structured.fully_connected as fc
models = {"ImageNet": "mobilenet"}
# Implementation based on https://github.com/marvis/pytorch-mobilenet
class MobileNet(nn.Module):
def __init__(self, input_channels=3, num_classes=1000, conv2d=fc.Conv2d,
args=None):
super(MobileNet, self).__init__()
self.width = args.width_multiplier
self.num_classes = num_classes
self.model = nn.Sequential(
self.conv_bn(input_channels, 32*self.width, 2, args),
self.conv_dw(32*self.width, 64*self.width, 1, conv2d, args),
self.conv_dw(64*self.width, 128*self.width, 2, conv2d, args),
self.conv_dw(128*self.width, 128*self.width, 1, conv2d, args),
self.conv_dw(128*self.width, 256*self.width, 2, conv2d, args),
self.conv_dw(256*self.width, 256*self.width, 1, conv2d, args),
self.conv_dw(256*self.width, 512*self.width, 2, conv2d, args),
self.conv_dw(512*self.width, 512*self.width, 1, conv2d, args),
self.conv_dw(512*self.width, 512*self.width, 1, conv2d, args),
self.conv_dw(512*self.width, 512*self.width, 1, conv2d, args),
self.conv_dw(512*self.width, 512*self.width, 1, conv2d, args),
self.conv_dw(512*self.width, 512*self.width, 1, conv2d, args),
self.conv_dw(512*self.width, 1024*self.width, 2, conv2d, args),
self.conv_dw(1024*self.width, 1024*self.width, 1, conv2d, args),
nn.AvgPool2d(7),
)
self.fc = conv2d(1024*self.width, num_classes, kernel_size=1, args=args)
@staticmethod
def conv_bn(inputs, outputs, stride, args):
# Removed batch-norm layers because they are included within the
# modified convolution implementations.
return nn.Sequential(
fc.Conv2d(inputs, outputs, 3, stride, 1, bias=False, args=args),
nn.ReLU(inplace=True)
)
@staticmethod
def conv_dw(inputs, outputs, stride, conv2d, args):
# Removed batch-norm layers because they are included within the
# modified convolution implementations.
return nn.Sequential(
conv2d(inputs, inputs, 3, stride, 1, groups=inputs, bias=False,
args=args),
nn.ReLU(inplace=True),
conv2d(inputs, outputs, 1, 1, 0, bias=False, args=args),
nn.ReLU(inplace=True),
)
def forward(self, x):
x = self.model(x)
x = x.view(-1, int(1024 * self.width), 1, 1)
x = self.fc(x)
return x.view(-1, self.num_classes)
def mobilenet(**kwargs):
model = MobileNet(**kwargs)
return model
|
db434/nn-restrict | tests/test_quantisers.py | from modifiers.numbers import *
def _check_equal(input_data, modifier, expected_result):
"""
Verify that modifier(input_tensor) == expected_result. Throws an assertion
error if not.
:param input_data: array of arbitrary dimensions.
:param modifier: function which takes a tensor and returns a tensor of
the same size.
:param expected_result: array of same size as input_tensor.
"""
# Some operations fail if the input is not a Variable - I'm not sure why.
input_tensor = torch.autograd.Variable(torch.Tensor(input_data))
output_tensor = torch.Tensor(expected_result)
assert torch.equal(modifier(input_tensor).data, output_tensor)
def _noise_test():
"""Test whether noise is added properly. Due to the randomness involved,
this test is not perfect. It checks that most values have changed,
and that the largest change is less than or equal to the amount
requested."""
data = torch.autograd.Variable(torch.Tensor([0.0] * 100))
result = noise_fn(1.0)(data).data
positive = 0
negative = 0
for value in result:
assert -1.0 <= value <= 1.0
if value < 0.0:
negative += 1
elif value > 0.0:
positive += 1
# Ensure we're not left with zeroes, and that roughly the same number of
# values have been increased as have been decreased.
# These could fail if we're incredibly unlucky.
assert positive + negative > 95
assert 35 < positive < 65
assert 35 < negative < 65
def _precision_test():
"""Test whether values are correctly rounded."""
_check_equal([1.1, 0.6, 1.0, 1.5, 2.499], precision_fn(1.0),
[1.0, 1.0, 1.0, 2.0, 2.0])
_check_equal([1.1, 0.6, 1.0, 1.5, 2.499], precision_fn(0.5),
[1.0, 0.5, 1.0, 1.5, 2.5])
def _cap_test():
"""Test whether upper bounds (on magnitudes) work correctly."""
_check_equal([-2.0, -1.0, 0.0, 1.0, 2.0], cap_fn(5.0),
[-2.0, -1.0, 0.0, 1.0, 2.0])
_check_equal([-2.0, -1.0, 0.0, 1.0, 2.0], cap_fn(1.0),
[-1.0, -1.0, 0.0, 1.0, 1.0])
_check_equal([-2.0, -1.0, 0.0, 1.0, 2.0], cap_fn(0.5),
[-0.5, -0.5, 0.0, 0.5, 0.5])
def _threshold_test():
"""Test whether lower bounds (on magnitudes) work correctly."""
_check_equal([-2.0, -1.0, 0.0, 1.0, 2.0], threshold_fn(0.5),
[-2.0, -1.0, 0.0, 1.0, 2.0])
_check_equal([-2.0, -1.0, 0.0, 1.0, 2.0], threshold_fn(1.0),
[-2.0, 0.0, 0.0, 0.0, 2.0])
_check_equal([-2.0, -1.0, 0.0, 1.0, 2.0], threshold_fn(5.0),
[ 0.0, 0.0, 0.0, 0.0, 0.0])
def _quantiser_test():
_noise_test()
_precision_test()
_cap_test()
_threshold_test()
print("All tests passed.")
if __name__ == "__main__":
_quantiser_test()
|
db434/nn-restrict | datasets/ImageNet.py | import os
import torch.utils.data as data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import locations
# TODO subclass an abstract Dataset class.
class ImageNet(object):
_normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# Some sensible defaults.
name = "ImageNet"
default_model = "alexnet"
location = locations.imagenet
# See training.lr_schedule.py for explanation.
default_lr = 0.1
default_lr_steps = [(30, 0.1), (30, 0.1)]
default_epochs = 90
# classes = ???
@staticmethod
def input_channels():
return 3
@staticmethod
def num_classes():
return 1000
@staticmethod
def data_loaders(num_workers, batch_size, distributed=False):
"""Return train and validation data loaders for the ImageNet dataset."""
return ImageNet.train_loader(num_workers, batch_size, distributed), \
ImageNet.val_loader(num_workers, batch_size)
@staticmethod
def train_loader(num_workers, batch_size, distributed):
transform = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
ImageNet._normalize,
])
dataset = datasets.ImageFolder(os.path.join(ImageNet.location, "train"),
transform)
if distributed:
sampler = data.distributed.DistributedSampler(dataset)
else:
sampler = None
loader = data.DataLoader(
dataset, batch_size=batch_size, shuffle=(sampler is None),
num_workers=num_workers, pin_memory=True, sampler=sampler)
return loader
@staticmethod
def val_loader(num_workers, batch_size):
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
ImageNet._normalize,
])
dataset = datasets.ImageFolder(os.path.join(ImageNet.location, "val"),
transform)
loader = data.DataLoader(
dataset, batch_size=batch_size, shuffle=False,
num_workers=num_workers, pin_memory=True)
return loader
@staticmethod
def test_loader(num_workers, batch_size):
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
ImageNet._normalize,
])
dataset = datasets.ImageFolder(os.path.join(ImageNet.location, "test"),
transform)
loader = data.DataLoader(
dataset, batch_size=batch_size, shuffle=False,
num_workers=num_workers, pin_memory=True)
return loader
|
db434/nn-restrict | graph.py | <reponame>db434/nn-restrict
import argparse
from collections import OrderedDict
import csv
import matplotlib.pyplot as plt
import structured
from util import log
def get_fields(csv_file, fields):
"""Return a dictionary mapping each name in `fields` to the sequence of
values seen for that field in the given file.
"""
result = OrderedDict()
for field in fields:
result[field] = []
with open(csv_file) as f:
reader = csv.DictReader(f)
for row in reader:
for field in fields:
result[field].append(row[field])
return result
def plot(data, colours, line_styles):
"""Plot the given data.
Expect to receive a dict mapping experiment names to results. Each result is
itself a dict mapping a type of data to a sequence of values.
Each experiment has a different colour, and each type of data has a
different line style.
"""
plt.style.use("seaborn-poster") # See also seaborn-talk and seaborn-paper.
plt.grid(axis="y", linestyle="-", color="#d8dcd6")
for name, experiment in data.items():
for label, series in experiment.items():
epochs = range(1, len(series) + 1)
line, = plt.plot(epochs, series, color=colours[name],
linestyle=line_styles[label])
line.set_label(name + " " + label)
plt.legend(loc="lower right")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.show()
def get_conv_type(filename):
"""Extract the convolution type from the name of the log file. Assumes the
log file has been created using this tool."""
for conv_type in structured.conv2d_types.keys():
if conv_type in filename:
return conv_type
else:
log.error("Couldn't detect convolution type of", filename)
exit(1)
def main():
parser = argparse.ArgumentParser(description="Plot training curves")
parser.add_argument("filename", type=str, nargs="+",
help="CSV file containing training data")
args = parser.parse_args()
results = OrderedDict()
for filename in args.filename:
data = get_fields(filename, ["Val top1", "Train top1"])
conv_type = get_conv_type(filename)
results[conv_type] = data
line_styles = {"Val top1": "-", "Train top1": ":"}
colours = {"fc": "r", "separable": "g", "shuffle": "b", "butterfly": "c",
"roots": "m", "shift": "y", "hadamard": "k"}
plot(results, colours, line_styles)
if __name__ == "__main__":
main()
|
db434/nn-restrict | models/alexnet.py | <filename>models/alexnet.py
import torch.nn as nn
import structured.fully_connected as fc
models = {"ImageNet": "alexnet"}
class AlexNet(nn.Module):
def __init__(self, input_channels=3, num_classes=1000, conv2d=fc.Conv2d,
args=None):
super(AlexNet, self).__init__()
self.width = args.width_multiplier
w = self.width # Super short name
self.num_classes = num_classes
self.features = nn.Sequential(
fc.Conv2d(input_channels, 64*w, kernel_size=11, stride=4,
padding=2, args=args),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
conv2d(64*w, 192*w, kernel_size=5, padding=2, args=args),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
conv2d(192*w, 384*w, kernel_size=3, padding=1, args=args),
nn.ReLU(inplace=True),
conv2d(384*w, 256*w, kernel_size=3, padding=1, args=args),
nn.ReLU(inplace=True),
conv2d(256*w, 256*w, kernel_size=3, padding=1, args=args),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.classifier = nn.Sequential(
nn.Dropout(),
# nn.Linear(256 * 6 * 6 * w, 4096),
conv2d(int(256*w)*6*6, 4096*w, kernel_size=1, args=args),
nn.ReLU(inplace=True),
nn.Dropout(),
# nn.Linear(4096, 4096),
conv2d(4096*w, 4096*w, kernel_size=1, args=args),
nn.ReLU(inplace=True),
# nn.Linear(4096, num_classes),
conv2d(4096*w, num_classes, kernel_size=1, args=args),
# fc.Conv2d(4096 * w, num_classes, kernel_size=1),
)
def forward(self, x):
x = self.features(x)
# x = x.view(x.size(0), 256 * 6 * 6 * self.width)
x = x.view(x.size(0), int(256 * self.width) * 6 * 6, 1, 1)
x = self.classifier(x)
return x.view(x.size(0), self.num_classes)
def alexnet(**kwargs):
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
Pretrained: 43.45%/20.91% top1/top5 error.
"""
model = AlexNet(**kwargs)
return model
|
db434/nn-restrict | training/distillation.py | <reponame>db434/nn-restrict
import os.path
import torch
import torch.nn.functional as F
import locations
import models
import structured
from util import checkpoint, log
from . import trainer
class Trainer(trainer.Trainer):
"""
Class which trains a model using an experienced teacher model.
"""
def __init__(self, dataset, model, schedule, args, optimiser=None,
criterion=None, teacher=None):
super(Trainer, self).__init__(dataset, model, schedule, args,
optimiser, criterion)
if teacher is None:
self.teacher = self._default_teacher(dataset)
else:
self.teacher = teacher
def minibatch(self, input_data, target):
"""
Pass one minibatch of data through the model.
:param input_data: Tensor of input data.
:param target: Ground truth output data.
:return: Output produced by the model and the loss.
"""
input_var = torch.autograd.Variable(input_data)
target_var = torch.autograd.Variable(target)
# Compute output
# In some cases it would be quicker to evaluate all teacher outputs in
# advance, but this can require huge storage for some datasets.
output = self.model(input_var)
teacher_output = self.teacher(input_var)
# TODO get an alpha and a temperature from somewhere. Alpha might
# even need a schedule.
alpha = 0.9
temperature = 20.0
loss = self.criterion(output, teacher_output, target_var, alpha,
temperature)
return output, loss
@staticmethod
def _default_criterion():
"""
Create a default criterion used to evaluate the loss of the model.
This version is different from all the criteria in PyTorch because it
must also receive the teacher's output.
Based on
https://arxiv.org/abs/1312.6184
https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py
:return: A function which computes the teacher-student loss.
"""
def kd_loss(student, teacher, target, alpha, temperature):
"""
Compute the loss in a teacher-student setting.
:param student: Output from student network.
:param teacher: Output from teacher network.
:param target: Target output.
:param alpha: Weight given to teacher vs target. 1.0 = all loss
comes from teacher, 0.0 = all loss comes from target.
:param temperature: Softmax temperature. High = flat
distributions, low = spiky distributions.
:return: Loss of student output.
"""
normal_loss = F.cross_entropy(student, target)
# Inputs need to be logarithmic, targets need to be linear.
teacher_loss = F.kl_div(F.log_softmax(student / temperature, dim=1),
F.softmax(teacher / temperature, dim=1))
teacher_loss *= temperature ** 2
return (alpha * teacher_loss) + ((1.0 - alpha) * normal_loss)
return kd_loss
def _default_teacher(self, dataset):
"""
Load a model suitable for teaching with the given dataset.
:param dataset: The dataset to be used for training.
:return: A model which achieves high accuracy on the dataset.
"""
# TODO: Offer to train a teacher if not found?
directory = locations.teachers
teachers = {
"CIFAR-10": {
"name": "resnet56",
"args": {"width_multiplier": 2},
"filename": "resnet56-2x-fc.pth.tar"
},
"ImageNet": {
"name": "resnet50",
"args": {"width_multiplier": 2,
"conv_type": structured.depthwise_separable.Conv2d},
"filename": "resnet50-2x-separable.pth.tar"
}
}
if dataset.name in teachers:
log.info("Creating teacher network.")
details = teachers[dataset.name]
model = models.get_model(details["name"],
distributed=self.distributed,
use_cuda=self.use_cuda, **details["args"])
# An optimiser is stored with the model, but it isn't needed here.
# Create a dummy optimiser. (Assuming SGD.)
optimiser = torch.optim.SGD(model.parameters(), 0)
# When loading state, data is returned to allow resumption of
# training. We don't care about that here.
path = os.path.join(directory, details["filename"])
_, _ = checkpoint.load_path(path, model, optimiser)
# Put the teacher into teaching mode: switch off dropout layers,
# etc. and prevent any weight updates.
model.eval()
for param in model.parameters():
param.requires_grad = False
return model
else:
log.error("No teacher available for", dataset.name, "dataset")
exit(1)
|
db434/nn-restrict | cifar10.py | <filename>cifar10.py
import common
import datasets
def main():
dataset = datasets.Cifar10
common.process(dataset=dataset)
if __name__ == '__main__':
main()
|
db434/nn-restrict | util/log.py | import sys
def info(*args, **kwargs):
"""Report low-priority information. Has the same interface as `print`."""
print(*args, file=sys.stderr, **kwargs)
def error(*args, **kwargs):
"""Report high-priority information. Has the same interface as `print`."""
print(*args, file=sys.stderr, **kwargs)
|
db434/nn-restrict | modifiers/numbers.py | <filename>modifiers/numbers.py
"""
Module with various options for controlling number formats in the network.
"""
from collections.abc import Iterable
import torch.nn.functional
from .modules import *
def transform_gradients(model, transformation):
"""
Apply a transformation to all gradient computations in the model.
:param model: Model (or part of model) to apply the transformations on.
:param transformation: Function which takes a tensor and returns a
transformed tensor. A list of such functions is also accepted and will be
applied in order.
"""
# Ensure the interface of the functions matches the way we receive
# gradients.
fn = _wrap_for_gradients(transformation)
# Only interested in bottom-level modules. Otherwise we might apply the
# same transformation multiple times.
# TODO: Check whether this quantises gradients of parameters. If not,
# consider using Tensor.register_hook() on all parameters.
leaves = (module for module in model.modules()
if len(list(module.children())) == 0)
for module in leaves:
module.register_backward_hook(fn)
def transform_activations(model, transformation):
"""
Apply a transformation to all activations in the model.
:param model: Model (or part of model) to apply the transformations on.
:param transformation: Function which takes a tensor and returns a
transformed tensor. A list of such functions is also accepted and will be
applied in order.
"""
# Ensure the interface of the functions matches the way we receive
# activations.
fn = _wrap_for_activations(transformation)
# Only interested in Quantiser modules.
quantisers = (module for module in model.modules()
if isinstance(module, Quantiser))
for module in quantisers:
module.set_quantisation(fn)
def transform_weights(model, transformation):
"""
Apply a transformation to all weights in the model.
:param model: Model (or part of model) to apply the transformations on.
:param transformation: Function which takes a tensor and returns a
transformed tensor. A list of such functions is also accepted and will be
applied in order.
"""
# Ensure the interface of the functions matches the way we receive
# weights.
fn = _wrap_for_weights(transformation)
# Only interested in modules which can accept a weight transform function.
modules = (module for module in model.modules()
if hasattr(module, "set_weight_transform"))
for module in modules:
module.set_weight_transform(fn)
def restrict_gradients(model, minimum=0.0, maximum=0.0, noise=0.0,
precision=0.0):
"""Place restrictions on possible gradient values for all layers of the
model."""
modifiers = _get_modifiers(minimum, maximum, noise, precision)
if len(modifiers) > 0:
transform_gradients(model, modifiers)
def restrict_activations(model, minimum=0.0, maximum=0.0, noise=0.0,
precision=0.0):
"""Place restrictions on possible activation values for all layers of the
model."""
modifiers = _get_modifiers(minimum, maximum, noise, precision)
if len(modifiers) > 0:
transform_activations(model, modifiers)
def restrict_weights(model, minimum=0.0, maximum=0.0, noise=0.0, precision=0.0):
"""Place restrictions on possible weight values for all layers of the
model."""
modifiers = _get_modifiers(minimum, maximum, noise, precision)
if len(modifiers) > 0:
transform_weights(model, modifiers)
def _get_modifiers(minimum, maximum, noise, precision):
"""Return a list of functions which each receive a Tensor and return a
Tensor of the same size."""
# Functions will be applied in the order that they're added to this list.
# Noise should definitely be added before any rounding, but I'm not sure
# whether precision or thresholding should come first. I think precision.
modifiers = []
if noise > 0.0:
modifiers.append(noise_fn(noise))
if precision > 0.0:
modifiers.append(precision_fn(precision))
if minimum > 0.0:
modifiers.append(stochastic_threshold_fn(minimum))
if maximum > 0.0:
modifiers.append(cap_fn(maximum))
return modifiers
def noise_fn(magnitude):
"""Returns a function which takes a Tensor as input and returns a new Tensor
with random noise added. The amount of noise is in the range +/- magnitude.
"""
def add_noise(tensor):
if tensor.is_cuda:
noise = torch.cuda.FloatTensor(tensor.size())
else:
noise = torch.FloatTensor(tensor.size())
noise.uniform_(-magnitude, magnitude)
noise = torch.autograd.Variable(noise, requires_grad=False)
return tensor + noise
return add_noise
def precision_fn(precision):
"""Returns a function which takes a Tensor as input and returns a new Tensor
where all elements are the nearest multiple of `precision`."""
def round_to_precision(tensor):
multiples = tensor / precision
multiples = torch.round(multiples)
return multiples * precision
return round_to_precision
def threshold_fn(value):
"""Returns a function which takes a Tensor as input and returns a new Tensor
in which all values with magnitudes less than `value` have been replaced
with 0."""
# I don't think there's a way to apply a threshold from below, so I cheat.
def apply_threshold(tensor):
signs = torch.sign(tensor)
magnitudes = torch.abs(tensor)
magnitudes = torch.nn.functional.threshold(magnitudes, value, 0.0)
return magnitudes * signs
return apply_threshold
def stochastic_threshold_fn(value):
"""Returns a function which takes a Tensor as input and returns a new Tensor
in which all values with magnitudes less than `value` have a chance of being
replaced with 0. The probability for an input of i is `(value-i)/value`. All
remaining elements less than `value` are replaced by `value`."""
# I don't think there's a way to apply a threshold from below, so I cheat.
def apply_threshold(tensor):
# Decompose inputs into signs and magnitudes to make thresholding
# easier.
signs = torch.sign(tensor)
magnitudes = torch.abs(tensor)
# Create a random number for each input. Remove an input if it is less
# than its random value. Random values have a maximum of `value`, so all
# inputs larger than the threshold will remain.
if tensor.is_cuda:
rand = torch.cuda.FloatTensor(magnitudes.size())
else:
rand = torch.FloatTensor(magnitudes.size())
rand.uniform_(0.0, value)
rand = torch.autograd.Variable(rand, requires_grad=False)
mask_low = magnitudes.lt(rand)
# Set an input to `value` if it is less than the threshold, but not
# to be set to zero. I actually set all values less than `value` to
# `value`, and then set a subset of those to zero.
mask_high = magnitudes.lt(value)
magnitudes.masked_fill_(mask_high, value)
magnitudes.masked_fill_(mask_low, 0.0)
return magnitudes * signs
return apply_threshold
def cap_fn(value):
"""Returns a function which takes a Tensor as input and returns a new Tensor
where any elements with magnitude larger than `value` have been replaced
with +/-`value`."""
def apply_cap(tensor):
return torch.clamp(tensor, -value, value)
return apply_cap
def _wrap_for_gradients(functions):
"""Wraps a list of Tensor transformation functions to create a single
function call which provides all necessary arguments for a backward hook.
Can then be applied using `register_backward_hook(fn)`.
"""
functions = _make_iterable(functions)
def backward_hook(module, grad_in, grad_out):
return _map_tensor(functions, grad_in)
return backward_hook
def _wrap_for_activations(functions):
"""Wrap a list of Tensor transformation functions to create a single
function call which takes a single tensor as its argument.
"""
functions = _make_iterable(functions)
def forward(tensor):
for fn in functions:
tensor = fn(tensor)
return tensor
return forward
def _wrap_for_weights(functions):
"""Wrap a list of Tensor transformation functions to create a single
function call which takes a single tensor as its argument.
"""
functions = _make_iterable(functions)
def forward(tensor):
for fn in functions:
tensor = fn(tensor)
return tensor
return forward
def _make_iterable(data):
"""
If a value is not already iterable, make it so.
:param data: Arbitrary data.
:return: Iterable object containing `data`.
"""
if not isinstance(data, Iterable):
data = [data]
return data
def _map_tensor(functions, tensors):
"""
Apply the composition of all functions to all given tensors. If a tensor
is None, it remains as None.
:param functions: iterable collection of functions. Each must take a
tensor and return a tensor of the same size. The first function is
applied first.
:param tensors: iterable collection of tensors.
:return: tuple of tensors with identical shapes to input.
"""
new_tensors = []
for tensor in tensors:
if tensor is None:
new_tensors.append(None)
else:
for fn in functions:
tensor = fn(tensor)
new_tensors.append(tensor)
return tuple(new_tensors)
|
db434/nn-restrict | tests/test_shuffle.py | import random
import torch
from structured.shuffle import *
def _shuffle_test_run(in_data, groups, out_data):
"""Wrapper for a single shuffle."""
_, channels, _, _ = in_data.size()
shuffler = Conv2d(channels, channels, 1, groups=groups)
result = shuffler.shuffle(in_data, groups)
assert torch.equal(result, out_data)
def _shuffle_test():
"""Try out a few simple shuffles to make sure things work as expected."""
# Simple 1D data to start. Need to convert to Torch's format, and make it
# look like it's 4D neural network data. Each value is a channel.
data = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(1, 8, 1, 1)
# Data shuffled with various numbers of groups.
data1 = data
data2 = torch.Tensor([1, 5, 2, 6, 3, 7, 4, 8]).view(1, 8, 1, 1)
data4 = torch.Tensor([1, 3, 5, 7, 2, 4, 6, 8]).view(1, 8, 1, 1)
data8 = data
_shuffle_test_run(data, 1, data1)
_shuffle_test_run(data, 2, data2)
_shuffle_test_run(data, 4, data4)
_shuffle_test_run(data, 8, data8)
# 2D data. Each inner list is a separate channel.
data = torch.Tensor([[11, 12], [21, 22], [31, 32], [41, 42]]).view(1, 4, 2,
1)
data1 = data
data2 = torch.Tensor([[11, 12], [31, 32], [21, 22], [41, 42]]).view(1, 4, 2,
1)
data4 = data
_shuffle_test_run(data, 1, data1)
_shuffle_test_run(data, 2, data2)
_shuffle_test_run(data, 4, data4)
# 2D data. Each inner list is all the channels of a single batch.
data = torch.Tensor([[11, 12, 13, 14], [21, 22, 23, 24]]).view(2, 4, 1, 1)
data1 = data
data2 = torch.Tensor([[11, 13, 12, 14], [21, 23, 22, 24]]).view(2, 4, 1, 1)
data4 = data
_shuffle_test_run(data, 1, data1)
_shuffle_test_run(data, 2, data2)
_shuffle_test_run(data, 4, data4)
def _module_test():
"""Construct layers with a range of different parameters and check that
nothing crashes."""
for i in range(100):
inputs = random.randint(1, 10) * 8 # Ensure multiple of groups
outputs = random.randint(1, 10) * 8 # Ensure multiple of groups
padding = random.randint(0, 5)
kernel_size = padding * 2 + 1 # Ensure odd kernel size
layer = Conv2d(inputs, outputs, kernel_size, padding=padding)
in_data = torch.autograd.Variable(torch.Tensor(4, inputs, 10, 10))
out_data = layer(in_data)
assert out_data.size() == (4, outputs, 10, 10)
if __name__ == "__main__":
_shuffle_test()
_module_test()
print("All tests passed.")
|
db434/nn-restrict | __init__.py | __all__ = ["models", "structured"]
|
db434/nn-restrict | structured/shift.py | import torch
import torch.nn as nn
import torch.nn.functional
from . import wrapped
from util import log
class Shift(nn.Module):
"""Shift all feature maps by a fixed amount in different directions. Feature
maps are split into a group for each kernel element and shifted so that the
kernel element ends up in the centre of the kernel.
Based on this paper: https://arxiv.org/abs/1711.08141"""
def __init__(self, channels, kernel_size, dilation):
super(Shift, self).__init__()
# Ensure that kernel_size specifies both the width and height.
if isinstance(kernel_size, int):
self.kernel_size = (kernel_size, kernel_size)
else:
self.kernel_size = kernel_size
kernel_elements = self.kernel_size[0] * self.kernel_size[1]
assert channels >= kernel_elements
self.dilation = dilation
self.group_size = channels // kernel_elements
# Weird arguments needed for torch.nn.functional.pad.
# Each pair of values represents the padding to each end of a dimension.
# Starts from the final dimension (width) and works backwards.
self.padding = ((self.kernel_size[1] // 2) * dilation,
(self.kernel_size[1] // 2) * dilation,
(self.kernel_size[0] // 2) * dilation,
(self.kernel_size[0] // 2) * dilation)
def _extract_window(self, data, kx, ky, width, height):
"""Extract part of `data` corresponding to the given kernel position."""
y_start = ky * self.dilation
y_end = height - (self.kernel_size[0] - ky - 1) * self.dilation
x_start = kx * self.dilation
x_end = width - (self.kernel_size[1] - kx - 1) * self.dilation
return data[:, :, y_start:y_end, x_start:x_end]
def forward(self, x):
# Add padding as though we're about to do a convolution.
x = nn.functional.pad(x, self.padding)
batch, channels, height, width = x.size()
shifted = []
# Slice a different window out of each group.
group = 0
for ky in range(self.kernel_size[0]):
for kx in range(self.kernel_size[1]):
chan_start = group * self.group_size
chan_end = chan_start + self.group_size
data = x[:, chan_start:chan_end, :, :]
data = self._extract_window(data, kx, ky, width, height)
shifted.append(data)
group += 1
# Ensure all channels are now in shifted.
if group * self.group_size < channels:
data = x[:, group * self.group_size:, :, :]
data = self._extract_window(data, 0, 0, width, height)
shifted.append(data)
return torch.cat(shifted, dim=1)
# Same interface as torch.nn.Conv2d (except groups -> depth_multiplier).
class Conv2d(nn.Module):
"""A drop-in replacement for torch.nn.Conv2d which replaces non-trivial
convolutions with a "shift" operation. This involves moving all pixels of
a feature map by a fixed amount in a particular direction.
Computation savings are proportional to kernel size squared.
Based on this paper:
https://arxiv.org/abs/1711.08141
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
**kwargs):
super(Conv2d, self).__init__()
# Channel numbers can be scaled by floats, so need to be rounded back
# to integers.
in_channels = int(in_channels)
out_channels = int(out_channels)
# Special cases:
# * If kernel_size = 1, there is no room to express a shift direction.
if kernel_size == 1:
log.info("INFO: using default convolution instead of shift.")
log.info(" kernel_size = 1")
self.conv = nn.Sequential(
wrapped.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
**kwargs),
nn.BatchNorm2d(in_channels),
)
else:
# There needs to be at least one channel shifted according to each
# element of the "filter". Increase the number of intermediate
# channels to accommodate this if necessary.
intermediate_channels = max(out_channels, kernel_size ** 2)
# ReLU and BN layers are in the position specified in the paper.
# This is the CSC (conv-shift-conv) variant; there is also the
# option to add an extra shift layer at the start.
self.conv = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True),
# Pre-shift channel mixing.
wrapped.Conv2d(in_channels=in_channels,
out_channels=intermediate_channels,
kernel_size=1,
stride=1,
padding=0,
dilation=1,
groups=groups,
bias=False,
**kwargs),
# Shift (replaces KxK convolution).
Shift(intermediate_channels, kernel_size, dilation),
nn.BatchNorm2d(intermediate_channels),
nn.ReLU(inplace=True),
# Post-shift channel mixing.
wrapped.Conv2d(in_channels=intermediate_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=0,
dilation=1,
groups=groups,
bias=bias,
**kwargs),
)
def forward(self, x):
return self.conv(x)
|
db434/nn-restrict | tests/test_hadamard.py | import random
from structured.hadamard import *
def _sublayer_test():
"""Test a single Hadamard sub-layer.
A single sub-layer doesn't do much. It:
* Splits data in two on the channel dimension
* Reorders one of these two segments (again on the channel dimension)
* Recombines the results
"""
layer = HadamardLayer(channels=8, butterfly_size=4)
data1d = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(1, 8, 1, 1)
data2d = torch.Tensor(
[11, 12, 21, 22, 31, 32, 41, 42, 51, 52, 61, 62, 71, 72, 81, 82]).view(
1, 8, 2, 1)
# Test channel splitting. Want alternating pairs of channels in each output
# tensor. (Butterfly size = 4, so wings are 2 channels each.)
odd1d = torch.Tensor([1, 2, 5, 6]).view(1, 4, 1, 1)
even1d = torch.Tensor([3, 4, 7, 8]).view(1, 4, 1, 1)
odd2d = torch.Tensor([11, 12, 21, 22, 51, 52, 61, 62]).view(1, 4, 2, 1)
even2d = torch.Tensor([31, 32, 41, 42, 71, 72, 81, 82]).view(1, 4, 2, 1)
odd, even = layer.extract_wings(data1d)
assert torch.equal(odd, odd1d)
assert torch.equal(even, even1d)
odd, even = layer.extract_wings(data2d)
assert torch.equal(odd, odd2d)
assert torch.equal(even, even2d)
# assemble_wings should do the reverse of extract_wings.
assembled1d = layer.assemble_wings(odd1d, even1d)
assert torch.equal(assembled1d, data1d)
assembled2d = layer.assemble_wings(odd2d, even2d)
assert torch.equal(assembled2d, data2d)
def _hadamard_func_test():
"""Ensure that the thing being computed is indeed the Hadamard transform."""
# Using the example from here:
# https://en.wikipedia.org/wiki/Hadamard_transform
hadamard = Hadamard(channels=8)
input1d = torch.Tensor([1, 0, 1, 0, 0, 1, 1, 0]).view(1, 8, 1, 1)
output1d = torch.Tensor([4, 2, 0, -2, 0, 2, 0, 2]).view(1, 8, 1, 1)
output = hadamard(input1d)
assert torch.equal(output, output1d)
input2d = torch.Tensor(
[1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0]).view(1, 8, 2, 1)
output2d = torch.Tensor(
[4, 4, 2, 2, 0, 0, -2, -2, 0, 0, 2, 2, 0, 0, 2, 2]).view(1, 8, 2, 1)
output = hadamard(input2d)
assert torch.equal(output, output2d)
def _module_test():
"""Test an entire Hadamard module.
For a range of different input and output sizes, ensure that the layer
doesn't crash when running.
Restrictions:
* outputs == inputs == a power of 2
Note: I don't yet test the output for correctness.
"""
for i in range(100):
log_channels = random.randint(1, 10)
channels = 2 ** log_channels
kernel_size = 3
layer = Conv2d(channels, channels, kernel_size, padding=1)
in_data = torch.autograd.Variable(torch.Tensor(4, channels, 10, 10))
out_data = layer(in_data)
assert out_data.size() == (4, channels, 10, 10)
def _hadamard_test():
"""Test all components of a Hadamard layer."""
_sublayer_test()
_hadamard_func_test()
_module_test()
print("All tests passed.")
if __name__ == "__main__":
_hadamard_test()
|
db434/nn-restrict | util/args.py | import argparse
import torch
import torch.distributed as dist
import models
import structured
def parse_args(dataset):
"""An argument parser with some behaviour common to all datasets."""
model_names = sorted(models.get_model_names(dataset=dataset))
conv_types = sorted(structured.conv2d_types.keys())
parser = argparse.ArgumentParser(description=dataset.name + " training")
# Architecture parameters.
arch = parser.add_argument_group(title="Architecture parameters")
arch.add_argument('--arch', '-a', metavar='ARCH',
default=dataset.default_model, choices=model_names,
help='model architecture: ' + ' | '.join(model_names)
+ ' (default: ' + dataset.default_model + ')')
arch.add_argument('--width-multiplier', '-w', default=1, type=float,
metavar="W",
help='ratio of channels compared to base model')
arch.add_argument('--conv-type', '-c', metavar='CONV', default='fc',
type=str, choices=conv_types,
help='type of convolution to use: ' + ' | '.join(
conv_types))
arch.add_argument('--min-bfly-size', metavar='SIZE', default=2, type=int,
help='minimum allowed butterfly size')
# Storage.
store = parser.add_argument_group(title="Storage options")
store.add_argument('--save-dir', default='.', type=str,
help='directory to save models in')
store.add_argument('--dump-dir', default=None, type=str,
help='directory to dump all weights, acts and grads')
store.add_argument('--undump-dir', default=None, type=str,
help='directory from which to restore dumped weights')
# Training options.
train = parser.add_argument_group(title="Training options")
train.add_argument('--epochs', default=dataset.default_epochs, type=int,
metavar='N', help='number of total epochs to run')
train.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
train.add_argument('--use-restarts', action='store_true',
help='periodically reset learning rate')
train.add_argument('--restart-period', metavar='EPOCHS', type=int,
help='restart learning rate after this many epochs')
train.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
train.add_argument('--lr', '--learning-rate',
default=dataset.default_lr, type=float,
metavar='LR', help='initial learning rate')
train.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
train.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
train.add_argument('--distill', action='store_true',
help='train using knowledge distillation')
train.add_argument('--resume', action='store_true',
help='resume from latest checkpoint in save_dir')
train.add_argument('--model-file', metavar='PATH', type=str,
help='load model checkpoint from the named file')
train.add_argument('--pretrained', action='store_true',
help='use pre-trained model')
# Number representations.
number = parser.add_argument_group(title="Number representations")
number.add_argument('--grad-noise', default=0.0, type=float,
help='add random noise to all gradients')
number.add_argument('--grad-precision', default=0.0, type=float,
help='set smallest possible difference between values')
number.add_argument('--grad-min', default=0.0, type=float,
help='zero all gradients with magnitudes below min')
number.add_argument('--grad-max', default=0.0, type=float,
help='clip all gradient magnitudes to max')
number.add_argument('--act-noise', default=0.0, type=float,
help='add random noise to all activations')
number.add_argument('--act-precision', default=0.0, type=float,
help='set smallest possible difference between values')
number.add_argument('--act-min', default=0.0, type=float,
help='zero all activations with magnitudes below min')
number.add_argument('--act-max', default=0.0, type=float,
help='clip all activation magnitudes to max')
number.add_argument('--weight-noise', default=0.0, type=float,
help='add random noise to all weights')
number.add_argument('--weight-precision', default=0.0, type=float,
help='set smallest possible difference between values')
number.add_argument('--weight-min', default=0.0, type=float,
help='zero all weights with magnitudes below min')
number.add_argument('--weight-max', default=0.0, type=float,
help='clip all weight magnitudes to max')
# Data collection.
collect = parser.add_argument_group(title="Data collection")
collect.add_argument('-s', '--stats', action='store_true', default=False,
help='print stats about the model and exit')
collect.add_argument('--gradients', action='store_true', default=False,
help='print stats about gradients and exit')
collect.add_argument('--dump-weights', action='store_true', default=False,
help='dump all weights to `dump-dir`')
collect.add_argument('--dump-acts', action='store_true', default=False,
help='dump all activations to `dump-dir`')
collect.add_argument('--dump-grads', action='store_true', default=False,
help='dump all gradients to `dump-dir`')
# Distributed computing options.
distribute = parser.add_argument_group(title="Distributed computing")
distribute.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
distribute.add_argument('--dist-url', default='tcp://172.16.58.3:23456',
type=str,
help='url used to set up distributed training')
distribute.add_argument('--dist-backend', default='gloo', type=str,
help='distributed backend')
# Text generation.
generate = parser.add_argument_group(title="Text generation")
generate.add_argument('--generate', action='store_true', default=False,
help='generate text')
generate.add_argument('--words', default=100, type=int,
help='number of words to generate')
generate.add_argument('--temperature', default=1.0, type=float,
help='higher temperature gives higher diversity')
# Miscellaneous.
misc = parser.add_argument_group(title="Miscellaneous")
misc.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
misc.add_argument('-j', '--workers', default=16, type=int, metavar='N',
help='number of data loading workers (default: 16)')
misc.add_argument('--print-freq', '-p', default=100, type=int, metavar='N',
help='print progress every N batches (default: 100)')
misc.add_argument('-e', '--evaluate', action='store_true',
help='evaluate model on validation set')
misc.add_argument('--random-seed', metavar='SEED', default=None, type=int,
help='initialise the random number generator')
misc.add_argument('--list-arches', action='store_true', default=False,
help='list all model architectures for all datasets')
# Some tidying and handling of simple parameters.
args = parser.parse_args()
if args.list_arches:
print("Any architecture can be applied to any dataset, but spatial "
"downsampling (e.g. pooling) may be inappropriate if image "
"sizes differ.")
print(", ".join(sorted(models.get_model_names())))
exit(0)
# Use int representation if possible - it prints more nicely.
if args.width_multiplier == int(args.width_multiplier):
args.width_multiplier = int(args.width_multiplier)
args.cuda = not args.no_cuda and torch.cuda.is_available()
args.distributed = args.world_size > 1
if args.distributed:
dist.init_process_group(backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size)
if args.random_seed:
torch.manual_seed(args.random_seed)
if args.restart_period is not None:
args.use_restarts = True
if args.resume:
assert args.save_dir or args.model_file
assert args.conv_type in structured.conv2d_types
# A bit of a hack to add more dummy arguments for text datasets.
if hasattr(dataset, "num_tokens"):
args.num_tokens = dataset.num_tokens()
return args
|
n8henrie/cookiecutter-rust | cookiecutter_runner.py | from cookiecutter.main import cookiecutter
import datetime
import os.path
curdir = os.path.dirname(os.path.abspath(__file__))
today = datetime.datetime.today()
date = today.date().isoformat()
year = today.year
extra_context = {
'release_date': date,
'year': year
}
cookiecutter(curdir, extra_context=extra_context)
|
alxpy/aiohttp-middlewares | aiohttp_middlewares/https.py | <reponame>alxpy/aiohttp-middlewares<filename>aiohttp_middlewares/https.py
"""
================
HTTPS Middleware
================
Change scheme for current request when aiohttp application deployed behind
reverse proxy with HTTPS enabled.
Usage
=====
.. code-block:: python
from aiohttp import web
from aiohttp_middlewares import https_middleware
# Basic usage
app = web.Application(middlewares=[https_middleware()])
# Specify custom headers to match, not `X-Forwarded-Proto: https`
app = web.Application(
middlewares=https_middleware({"Forwarded": "https"})
)
"""
import logging
from aiohttp import web
from aiohttp.web_middlewares import _Handler, _Middleware
from .annotations import DictStrStr
DEFAULT_MATCH_HEADERS = {"X-Forwarded-Proto": "https"}
logger = logging.getLogger(__name__)
def https_middleware(match_headers: DictStrStr = None) -> _Middleware:
"""
Change scheme for current request when aiohttp application deployed behind
reverse proxy with HTTPS enabled.
This middleware is required to use, when your aiohttp app deployed behind
nginx with HTTPS enabled, after aiohttp discounted
``secure_proxy_ssl_header`` keyword argument in
https://github.com/aio-libs/aiohttp/pull/2299.
:param match_headers:
Dict of header(s) from reverse proxy to specify that aiohttp run behind
HTTPS. By default:
.. code-block:: python
{"X-Forwarded-Proto": "https"}
"""
@web.middleware
async def middleware(
request: web.Request, handler: _Handler
) -> web.StreamResponse:
"""Change scheme of current request when HTTPS headers matched."""
headers = DEFAULT_MATCH_HEADERS
if match_headers is not None:
headers = match_headers
matched = any(
request.headers.get(key) == value for key, value in headers.items()
)
if matched:
logger.debug(
"Substitute request URL scheme to https",
extra={
"headers": headers,
"request_headers": dict(request.headers),
},
)
request = request.clone(scheme="https")
return await handler(request)
return middleware
|
alxpy/aiohttp-middlewares | aiohttp_middlewares/cors.py | r"""
===============
CORS Middleware
===============
.. versionadded:: 0.2.0
Dealing with CORS headers for aiohttp applications.
**IMPORTANT:** There is a `aiohttp-cors
<https://pypi.org/project/aiohttp_cors/>`_ library, which handles CORS
headers by attaching additional handlers to aiohttp application for
OPTIONS (preflight) requests. In same time this CORS middleware mimics the
logic of `django-cors-headers <https://pypi.org/project/django-cors-headers>`_,
where all handling done in the middleware without any additional handlers. This
approach allows aiohttp application to respond with CORS headers for OPTIONS or
wildcard handlers, which is not possible with ``aiohttp-cors`` due to
https://github.com/aio-libs/aiohttp-cors/issues/241 issue.
For detailed information about CORS (Cross Origin Resource Sharing) please
visit:
- `Wikipedia <https://en.m.wikipedia.org/wiki/Cross-origin_resource_sharing>`_
- Or `MDN <https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS>`_
Configuration
=============
**IMPORTANT:** By default, CORS middleware do not allow any origins to access
content from your aiohttp appliction. Which means, you need carefully check
possible options and provide custom values for your needs.
Usage
=====
.. code-block:: python
import re
from aiohttp import web
from aiohttp_middlewares import cors_middleware
from aiohttp_middlewares.cors import DEFAULT_ALLOW_HEADERS
# Unsecure configuration to allow all CORS requests
app = web.Application(
middlewares=[cors_middleware(allow_all=True)]
)
# Allow CORS requests from URL http://localhost:3000
app = web.Application(
middlewares=[
cors_middleware(origins=["http://localhost:3000"])
]
)
# Allow CORS requests from all localhost urls
app = web.Application(
middlewares=[
cors_middleware(
origins=[re.compile(r"^https?\:\/\/localhost")]
)
]
)
# Allow CORS requests from https://frontend.myapp.com as well
# as allow credentials
CORS_ALLOW_ORIGINS = ["https://frontend.myapp.com"]
app = web.Application(
middlewares=[
cors_middleware(
origins=CORS_ALLOW_ORIGINS,
allow_credentials=True,
)
]
)
# Allow CORS requests only for API urls
app = web.Application(
middelwares=[
cors_middleware(
origins=CORS_ALLOW_ORIGINS,
urls=[re.compile(r"^\/api")],
)
]
)
# Allow CORS requests for POST & PATCH methods, and for all
# default headers and `X-Client-UID`
app = web.Application(
middlewares=[
cors_middleware(
origings=CORS_ALLOW_ORIGINS,
allow_methods=("POST", "PATCH"),
allow_headers=DEFAULT_ALLOW_HEADERS + ("X-Client-UID",),
)
]
)
"""
import logging
import re
from aiohttp import web
from aiohttp.web_middlewares import _Handler, _Middleware
from .annotations import StrCollection, UrlCollection
from .utils import match_path
ACCESS_CONTROL = "Access-Control"
ACCESS_CONTROL_ALLOW = f"{ACCESS_CONTROL}-Allow"
ACCESS_CONTROL_ALLOW_CREDENTIALS = f"{ACCESS_CONTROL_ALLOW}-Credentials"
ACCESS_CONTROL_ALLOW_HEADERS = f"{ACCESS_CONTROL_ALLOW}-Headers"
ACCESS_CONTROL_ALLOW_METHODS = f"{ACCESS_CONTROL_ALLOW}-Methods"
ACCESS_CONTROL_ALLOW_ORIGIN = f"{ACCESS_CONTROL_ALLOW}-Origin"
ACCESS_CONTROL_EXPOSE_HEADERS = f"{ACCESS_CONTROL}-Expose-Headers"
ACCESS_CONTROL_MAX_AGE = f"{ACCESS_CONTROL}-Max-Age"
ACCESS_CONTROL_REQUEST_METHOD = f"{ACCESS_CONTROL}-Request-Method"
DEFAULT_ALLOW_HEADERS = (
"accept",
"accept-encoding",
"authorization",
"content-type",
"dnt",
"origin",
"user-agent",
"x-csrftoken",
"x-requested-with",
)
DEFAULT_ALLOW_METHODS = ("DELETE", "GET", "OPTIONS", "PATCH", "POST", "PUT")
DEFAULT_URLS = (re.compile(r".*"),)
logger = logging.getLogger(__name__)
def cors_middleware(
*,
allow_all: bool = False,
origins: UrlCollection = None,
urls: UrlCollection = None,
expose_headers: StrCollection = None,
allow_headers: StrCollection = DEFAULT_ALLOW_HEADERS,
allow_methods: StrCollection = DEFAULT_ALLOW_METHODS,
allow_credentials: bool = False,
max_age: int = None,
) -> _Middleware:
"""Middleware to provide CORS headers for aiohttp applications.
:param allow_all:
When enabled, allow any Origin to access content from your aiohttp web
application. **Please be careful with enabling this option as it may
result in security issues for your application.** By default: ``False``
:param origins:
Allow content access for given list of origins. Support supplying
strings for exact origin match or regex instances. By default: ``None``
:param urls:
Allow contect access for given list of URLs in aiohttp application.
By default: *apply CORS headers for all URLs*
:param expose_headers:
List of headers to be exposed with every CORS request. By default:
``None``
:param allow_headers:
List of allowed headers. By default:
.. code-block:: python
(
"accept",
"accept-encoding",
"authorization",
"content-type",
"dnt",
"origin",
"user-agent",
"x-csrftoken",
"x-requested-with",
)
:param allow_methods:
List of allowed methods. By default:
.. code-block:: python
("DELETE", "GET", "OPTIONS", "PATCH", "POST", "PUT")
:param allow_credentials:
When enabled apply allow credentials header in response, which results
in sharing cookies on shared resources. **Please be careful with
allowing credentials for CORS requests.** By default: ``False``
:param max_age: Access control max age in seconds. By default: ``None``
"""
@web.middleware
async def middleware(
request: web.Request, handler: _Handler
) -> web.StreamResponse:
# Initial vars
request_method = request.method
request_path = request.rel_url.path
# Is this an OPTIONS request
is_options_request = request_method == "OPTIONS"
# Is this a preflight request
is_preflight_request = (
is_options_request
and ACCESS_CONTROL_REQUEST_METHOD in request.headers
)
# Log extra data
log_extra = {
"is_preflight_request": is_preflight_request,
"method": request_method.lower(),
"path": request_path,
}
# Check whether CORS should be enabled for given URL or not. By default
# CORS enabled for all URLs
if not match_items(
DEFAULT_URLS if urls is None else urls, request_path
):
logger.debug(
"Request should not be processed via CORS middleware",
extra=log_extra,
)
return await handler(request)
# If this is a preflight request - generate empty response
if is_preflight_request:
response = web.StreamResponse()
# Otherwise - call actual handler
else:
response = await handler(request)
# Now check origin heaer
origin = request.headers.get("Origin")
# Empty origin - do nothing
if not origin:
logger.debug(
"Request does not have Origin header. CORS headers not "
"available for given requests",
extra=log_extra,
)
return response
# Set allow credentials header if necessary
if allow_credentials:
response.headers[ACCESS_CONTROL_ALLOW_CREDENTIALS] = "true"
# Check whether current origin satisfies CORS policy
if not allow_all and not (origins and match_items(origins, origin)):
logger.debug(
"CORS headers not allowed for given Origin", extra=log_extra
)
return response
# Now start supplying CORS headers
# First one is Access-Control-Allow-Origin
if allow_all and not allow_credentials:
cors_origin = "*"
else:
cors_origin = origin
response.headers[ACCESS_CONTROL_ALLOW_ORIGIN] = cors_origin
# Then Access-Control-Expose-Headers
if expose_headers:
response.headers[ACCESS_CONTROL_EXPOSE_HEADERS] = ", ".join(
expose_headers
)
# Now, if this is an options request, respond with extra Allow headers
if is_options_request:
response.headers[ACCESS_CONTROL_ALLOW_HEADERS] = ", ".join(
allow_headers
)
response.headers[ACCESS_CONTROL_ALLOW_METHODS] = ", ".join(
allow_methods
)
if max_age is not None:
response.headers[ACCESS_CONTROL_MAX_AGE] = str(max_age)
# If this is preflight request - do not allow other middlewares to
# process this request
if is_preflight_request:
logger.debug(
"Provide CORS headers with empty response for preflight "
"request",
extra=log_extra,
)
raise web.HTTPOk(headers=response.headers)
# Otherwise return normal response
logger.debug("Provide CORS headers for request", extra=log_extra)
return response
return middleware
def match_items(items: UrlCollection, value: str) -> bool:
"""Go through all items and try to match item with given value."""
return any(match_path(item, value) for item in items)
|
alxpy/aiohttp-middlewares | aiohttp_middlewares/error.py | <reponame>alxpy/aiohttp-middlewares<filename>aiohttp_middlewares/error.py
r"""
================
Error Middleware
================
.. versionadded:: 0.2.0
Middleware to handle errors in aiohttp applications.
Usage
=====
.. code-block:: python
import re
from aiohttp import web
from aiohttp_middlewares import error_context, error_middleware
# Default error handler
async def error(request: web.Request) -> web.Response:
with error_context(request) as context:
return web.Response(
text=context.message,
status=context.status,
content_type="text/plain"
)
# Error handler for API requests
async def api_error(request: web.Request) -> web.Response:
with error_context(request) as context:
return web.json_response(context.data, status=context.status)
# Basic usage (one error handler for whole application)
app = web.Application(
middlewares=[
error_middleware(default_handler=api_error)
]
)
# Advanced usage (multiple error handlers for different
# application parts)
app = web.Application(
middlewares=[
error_middleware(
default_handler=error,
config={re.compile(r"^\/api"): api_error}
)
]
)
"""
from contextlib import contextmanager
from typing import Dict, Iterator, Optional
import attr
from aiohttp import web
from aiohttp.web_middlewares import _Handler, _Middleware
from .annotations import DictStrAny, Url
from .utils import match_path
DEFAULT_EXCEPTION = Exception("Unhandled aiohttp-middlewares exception.")
IGNORE_LOG_STATUSES = (400, 404, 422)
ERROR_REQUEST_KEY = "error"
Config = Dict[Url, _Handler]
@attr.dataclass
class ErrorContext:
"""Context with all necessary data about the error."""
err: Exception
message: str
status: int
data: DictStrAny
@contextmanager
def error_context(request: web.Request) -> Iterator[ErrorContext]:
"""Context manager to retrieve error data inside of error handler (view).
The context will contain:
- Error itself
- Error message (by default: ``str(err)``)
- Error status (by default: ``500``)
- Error data dict (by default: ``{"detail": str(err)}``)
"""
err = get_error_from_request(request)
message = getattr(err, "message", None) or str(err)
data = getattr(err, "data", None) or {"detail": message}
status = getattr(err, "status", None) or 500
yield ErrorContext(err=err, message=message, status=status, data=data)
def error_middleware(
*, default_handler: _Handler, config: Config = None
) -> _Middleware:
"""Middleware to handle exceptions in aiohttp applications.
To catch all possible errors, please put this middleware on top of your
``middlewares`` list as:
.. code-block:: python
from aiohttp import web
from aiohttp_middlewares import error_middleware, timeout_middleware
app = web.Application(
midllewares=[
error_middleware(...),
timeout_middleware(...)
]
)
:param default_handler:
Default handler to called on error catched by error middleware.
:param config:
When application requires multiple error handlers, provide mapping in
format ``Dict[Url, _Handler]``, where ``Url`` can be an exact string
to match path or regex and ``_Handler`` is a handler to be called when
``Url`` matches current request path if any.
"""
@web.middleware
async def middleware(
request: web.Request, handler: _Handler
) -> web.StreamResponse:
try:
return await handler(request)
except Exception as err:
set_error_to_request(request, err)
error_handler = (
get_error_handler(request, config) or default_handler
)
return await error_handler(request)
return middleware
def get_error_from_request(request: web.Request) -> Exception:
"""Get previously stored error from request dict.
Return default exception if nothing stored before.
"""
return request.get(ERROR_REQUEST_KEY) or DEFAULT_EXCEPTION
def get_error_handler(
request: web.Request, config: Optional[Config]
) -> Optional[_Handler]:
"""Find error handler matching current request path if any."""
if not config:
return None
path = request.rel_url.path
for item, handler in config.items():
if match_path(item, path):
return handler
return None
def set_error_to_request(request: web.Request, err: Exception) -> Exception:
"""Store catched error to request dict."""
request[ERROR_REQUEST_KEY] = err
return err
|
alxpy/aiohttp-middlewares | tests/test_utils.py | <reponame>alxpy/aiohttp-middlewares
import re
import pytest
from aiohttp_middlewares import match_path
from aiohttp_middlewares.utils import match_request
URLS_COLLECTION = {
"/slow-url",
"/very-slow-url",
re.compile("/(very-very|very-very-very)-slow-url"),
}
URLS_DICT = {
"/slow-url": "POST",
"/very-slow-url": ["get", "post"],
re.compile("/(very-very|very-very-very)-slow-url"): {"GET", "post", "put"},
}
@pytest.mark.parametrize(
"url, path, expected",
(
("/slow-url", "/slow-url", True),
("/slow-url", "/slow-url/", False),
(re.compile("^/slow-url"), "/slow-url", True),
(re.compile("^/slow-url"), "/slow-url/", True),
(re.compile("^/slow-url"), "/very-slow-url", False),
),
)
def test_match_path(url, path, expected):
assert match_path(url, path) == expected
@pytest.mark.parametrize(
"urls, request_method, request_path, expected",
(
(URLS_COLLECTION, "GET", "/", False),
(URLS_COLLECTION, "POST", "/slow-url", True),
(URLS_COLLECTION, "GET", "/very-slow-url", True),
(URLS_COLLECTION, "POST", "/very-slow-url", True),
(URLS_COLLECTION, "GET", "/very-very-slow-url", True),
(URLS_COLLECTION, "POST", "/very-very-slow-url", True),
(URLS_COLLECTION, "GET", "/very-very-very-slow-url", True),
(URLS_COLLECTION, "POST", "/very-very-very-slow-url", True),
(URLS_DICT, "GET", "/", False),
(URLS_DICT, "GET", "/slow-url", False),
(URLS_DICT, "POST", "/slow-url", True),
(URLS_DICT, "GET", "/very-slow-url", True),
(URLS_DICT, "PATCH", "/very-slow-url", False),
(URLS_DICT, "GET", "/very-very-slow-url", True),
(URLS_DICT, "POST", "/very-very-slow-url", True),
(URLS_DICT, "PUT", "/very-very-slow-url", True),
(URLS_DICT, "PATCH", "/very-very-slow-url", False),
(URLS_DICT, "GET", "/very-very-very-slow-url", True),
(URLS_DICT, "POST", "/very-very-very-slow-url", True),
(URLS_DICT, "PUT", "/very-very-very-slow-url", True),
(URLS_DICT, "PATCH", "/very-very-very-slow-url", False),
),
)
def test_match_request(urls, request_method, request_path, expected):
assert match_request(urls, request_method, request_path) is expected
|
alxpy/aiohttp-middlewares | aiohttp_middlewares/constants.py | <reponame>alxpy/aiohttp-middlewares
"""
=============================
aiohttp_middlewares.constants
=============================
Collection of constants for ``aiohttp_middlewares`` project.
"""
#: Tuple of idempotent HTTP methods
IDEMPOTENT_METHODS = ("GET", "HEAD", "OPTIONS", "TRACE")
#: Tuple of non-idempotent HTTP methods
NON_IDEMPOTENT_METHODS = ("DELETE", "PATCH", "POST", "PUT")
|
alxpy/aiohttp-middlewares | tests/test_error_middleware.py | <filename>tests/test_error_middleware.py<gh_stars>0
import re
import pytest
from aiohttp import web
from aiohttp_middlewares import error_context, error_middleware
class LegalException(Exception):
def __init__(self):
super().__init__("Not available for legal reasons")
self.status = 451
self.data = {"paid": False, "pay_at": "https://payment.url/"}
async def api_error(request):
with error_context(request) as context:
return web.json_response(context.data, status=context.status)
async def error(request):
with error_context(request) as context:
return web.Response(
text=context.message,
status=context.status,
content_type="text/plain",
)
async def legal(request):
raise LegalException()
async def no_error_context(request):
return web.Response(text="Server Error", status=500)
@pytest.mark.parametrize(
"path, expected_status, expected_content_type, expected_text",
(
("/", 404, "text/plain", "Not Found"),
("/api/", 404, "application/json", '{"detail": "Not Found"}'),
(
"/api/legal/",
451,
"application/json",
'{"paid": false, "pay_at": "https://payment.url/"}',
),
("/legal/", 451, "text/plain", "Not available for legal reasons"),
("/no-error-context/", 500, "text/plain", "Server Error"),
),
)
async def test_multiple_handlers(
aiohttp_client, path, expected_status, expected_content_type, expected_text
):
app = web.Application(
middlewares=[
error_middleware(
default_handler=error,
config={
"/no-error-context/": no_error_context,
re.compile(r"^/api"): api_error,
},
)
]
)
app.router.add_get("/legal/", legal)
app.router.add_get("/api/legal/", legal)
client = await aiohttp_client(app)
response = await client.get(path)
assert response.status == expected_status
assert response.content_type == expected_content_type
assert await response.text() == expected_text
async def test_single_handler(aiohttp_client):
app = web.Application(
middlewares=[error_middleware(default_handler=error)]
)
client = await aiohttp_client(app)
response = await client.get("/does-not-exist.exe")
assert response.content_type == "text/plain"
assert response.status == 404
assert await response.text() == "Not Found"
|
alxpy/aiohttp-middlewares | aiohttp_middlewares/utils.py | """
=========================
aiohttp_middlewares.utils
=========================
Various utility functions for ``aiohttp_middlewares`` library.
"""
from .annotations import Url, Urls
def match_path(item: Url, path: str) -> bool:
"""Check whether current path is equal to given URL str or regexp.
:param item: URL to compare with request path.
:param path: Request path string.
"""
try:
return bool(item.match(path)) # type: ignore
except (AttributeError, TypeError):
return item == path
def match_request(urls: Urls, method: str, path: str) -> bool:
"""Check whether request method and path matches given URLs or not."""
found = [item for item in urls if match_path(item, path)]
if not found:
return False
if not isinstance(urls, dict):
return True
found_item = urls[found[0]]
method = method.lower()
if isinstance(found_item, str):
return found_item.lower() == method
return any(True for item in found_item if item.lower() == method)
|
alxpy/aiohttp-middlewares | aiohttp_middlewares/annotations.py | """
===============================
aiohttp_middlewares.annotations
===============================
Type annotation shortcuts for ``aiohttp_middlewares`` library.
"""
from typing import Any, Dict, FrozenSet, List, Set, Tuple, Union
from typing.re import Pattern
DictStrAny = Dict[str, Any]
DictStrStr = Dict[str, str]
IntCollection = Union[List[int], FrozenSet[int], Set[int], Tuple[int, ...]]
StrCollection = Union[List[str], FrozenSet[str], Set[str], Tuple[str, ...]]
Url = Union[str, Pattern]
UrlCollection = Union[List[Url], Set[Url], Tuple[Url, ...]]
UrlDict = Dict[Url, Union[StrCollection, str]]
Urls = Union[UrlCollection, UrlDict]
|
alxpy/aiohttp-middlewares | tests/test_timeout_middleware.py | <reponame>alxpy/aiohttp-middlewares
import asyncio
import pytest
from aiohttp import web
from aiohttp_middlewares import timeout_middleware
HALF_A_SECOND = 0.5
SECOND = 1
def create_app(seconds, ignore=None):
app = web.Application(
middlewares=[timeout_middleware(seconds, ignore=ignore)]
)
app.router.add_route("GET", "/", handler)
app.router.add_route("GET", "/slow", slow_handler)
return app
async def handler(request):
return web.json_response()
async def slow_handler(request):
await asyncio.sleep(SECOND)
return web.json_response()
@pytest.mark.parametrize(
"seconds, ignore, url, expected",
[
(SECOND - HALF_A_SECOND, None, "/", 200),
(SECOND - HALF_A_SECOND, None, "/slow", 504),
(SECOND + HALF_A_SECOND, None, "/", 200),
(SECOND + HALF_A_SECOND, None, "/slow", 200),
(SECOND - HALF_A_SECOND, ["/slow"], "/", 200),
(SECOND - HALF_A_SECOND, ["/slow"], "/slow", 200),
],
)
async def test_timeout_middleware(
aiohttp_client, seconds, ignore, url, expected
):
client = await aiohttp_client(create_app(seconds, ignore))
response = await client.get(url)
assert response.status == expected
|
alxpy/aiohttp-middlewares | tests/test_shield_middleware.py | import asyncio
import re
import pytest
from aiohttp import web
from aiohttp.test_utils import make_mocked_request
from aiohttp_middlewares import NON_IDEMPOTENT_METHODS, shield_middleware
def create_app(*, methods=None, urls=None, ignore=None):
app = web.Application(
middlewares=[
shield_middleware(methods=methods, urls=urls, ignore=ignore)
]
)
app.router.add_get("/one", handler)
app.router.add_post("/one", handler)
app.router.add_get("/two", handler)
app.router.add_post("/two", handler)
app.router.add_patch("/three", handler)
return app
async def handler(request):
return web.json_response(True)
def test_shield_middleware_no_arguments():
with pytest.raises(ValueError):
shield_middleware()
def test_shield_middleware_mixed_methods_and_urls():
with pytest.raises(ValueError):
shield_middleware(methods=NON_IDEMPOTENT_METHODS, urls=["/one"])
def test_shield_middleware_mixed_urls_and_ignore():
with pytest.raises(ValueError):
shield_middleware(urls=["/one"], ignore=["/two"])
@pytest.mark.parametrize(
"method, url",
[
("GET", "/one"),
("GET", "/two"),
("POST", "/one"),
("POST", "/two"),
("PATCH", "/three"),
],
)
async def test_shield_request_by_method(aiohttp_client, url, method):
app = create_app(methods=NON_IDEMPOTENT_METHODS, ignore=["/three"])
client = await aiohttp_client(app)
response = await client.request(method, url)
assert response.status == 200
assert await response.json() is True
@pytest.mark.parametrize(
"method, url",
[
("GET", "/one"),
("GET", "/two"),
("POST", "/one"),
("POST", "/two"),
("PATCH", "/three"),
],
)
async def test_shield_request_by_url(aiohttp_client, url, method):
app = create_app(
urls={"/one": ["POST"], re.compile(r"/(two|three)"): {"post", "patch"}}
)
client = await aiohttp_client(app)
response = await client.request(method, url)
assert response.status == 200
assert await response.json() is True
@pytest.mark.parametrize(
"method, value",
[("DELETE", False), ("GET", False), ("POST", True), ("PUT", False)],
)
async def test_shield_middleware_funcitonal(loop, method, value):
flag = False
client_ready = asyncio.Event()
handler_ready = asyncio.Event()
async def handler(request):
handler_ready.set()
# Cancellation point.
# When not shielded, CancelledError will be raised here.
await client_ready.wait()
nonlocal flag
flag = True
return web.Response(status=200)
# Run handler in a task.
middleware = shield_middleware(methods=frozenset({"POST"}))
task = loop.create_task(
middleware(make_mocked_request(method, "/"), handler)
)
await handler_ready.wait()
# Cancel the handler.
task.cancel()
client_ready.set()
# Wait for completion.
await asyncio.wait([task])
assert flag is value
|
alxpy/aiohttp-middlewares | aiohttp_middlewares/timeout.py | """
==================
Timeout Middleware
==================
Middleware to ensure that request handling does not exceeds X seconds.
Usage
=====
.. code-block:: python
from aiohttp import web
from aiohttp_middlewares import error_middleware, timeout_middleware
# Basic usage
app = web.Application(
middlewares=[timeout_middleware(29.5)]
)
# Ignore slow responses from list of urls
slow_urls = ("/slow-url", "/very-slow-url", "/very/very/slow/url")
app = web.Application(
middlewares=[timeout_middleware(4.5, ignore=slow_urls)]
)
# Ignore slow responsed from dict of urls. URL to ignore is a key,
# value is a lone string with HTTP method or list of strings with
# HTTP methods to ignore. HTTP methods are case-insensitive
slow_urls = {
"/slow-url": "POST",
"/very-slow-url": ("GET", "POST"),
}
app = web.Application(
middlewares=[timeout_middleware(4,5, ignore=slow_urls)]
)
# Handle timeout errors with error middleware
app = web.Application(
middlewares=[error_middleware(), timeout_middleware(14.5)]
)
"""
import logging
from typing import Union
from aiohttp import web
from aiohttp.web_middlewares import _Handler, _Middleware
from async_timeout import timeout
from .annotations import Urls
from .utils import match_request
logger = logging.getLogger(__name__)
def timeout_middleware(
seconds: Union[int, float], *, ignore: Urls = None
) -> _Middleware:
"""Ensure that request handling does not exceed X seconds.
This is helpful when aiohttp application served behind nginx or other
reverse proxy with enabled read timeout. And when this read timeout exceeds
reverse proxy generates error page instead of aiohttp app, which may result
in bad user experience.
For best results, please do not supply seconds value which equals read
timeout value at reverse proxy as it may results that request handling at
aiohttp will be ended after reverse proxy already responded with 504 error.
Timeout context manager accepts floats, so if nginx has read timeout in
30 seconds, it's ok to configure timeout middleware to raise timeout error
after 29.5 seconds. In that case in most cases user for sure will see the
error from aiohttp app instead of reverse proxy.
Notice that timeout middleware just raised :class:`asyncio.TimeoutError` in
case of exceeding seconds per request, but not handling the error by
itself. If you need to handle this error, please place
:func:`aiohttp_middlewares.error.error_middleware` in list of
application middlewares as well. Error middleware should be placed before
timeout middleware, so timeout errors can be catched and processed
properly.
In case if you need to "disable" timeout middleware for given request path,
please supply ignore collection as second positional argument, like:
.. code-block:: python
from aiohttp import web
app = web.Application(
middlewares=[timeout_middleware(14.5, ignore={"/slow-url"})]
)
In case if you need more flexible ignore rules you can pass ``ignore``
dict, where key is an URL to ignore and value is a collection of methods to
ignore from timeout handling for given URL.
.. code-block:: python
ignore = {"/slow-url": ["POST"]}
app = web.Application(
middlewares=[timeout_middleware(14.5, ignore=ignore)]
)
Behind the scene, when current request path match the URL from ignore
collection or dict timeout context manager will be configured to avoid
break the execution after X seconds.
:param seconds: Max amount of seconds for each handler call.
:param ignore:
Do not limit execution for any of given URLs (paths). This is useful
when request handler returns ``StreamResponse`` instead of regular
``Response``. You also can specify URLs as dict, where key is URL to
ignore from wrapping into timeout context and value is list of methods
to ignore. This is helpful when you need ignore only POST requests of
slow API endpoint, but still need to have GET requests to same endpoint
to not exceed X seconds.
"""
@web.middleware
async def middleware(
request: web.Request, handler: _Handler
) -> web.StreamResponse:
"""Wrap request handler into timeout context manager."""
request_method = request.method
request_path = request.rel_url.path
if ignore and match_request(ignore, request_method, request_path):
logger.debug(
"Ignore path from timeout handling",
extra={"method": request_method, "path": request_path},
)
return await handler(request)
with timeout(seconds):
return await handler(request)
return middleware
|
alxpy/aiohttp-middlewares | tests/test_https_middleware.py | <filename>tests/test_https_middleware.py
import pytest
from aiohttp import web
from aiohttp_middlewares import https_middleware
def create_app(match_headers):
app = web.Application(middlewares=[https_middleware(match_headers)])
app.router.add_route("GET", "/", handler)
return app
async def handler(request):
return web.json_response(request.url.scheme)
@pytest.mark.parametrize(
"match_headers, request_headers, expected",
[
(None, None, "http"),
(None, {"X-Forwarded-Proto": "http"}, "http"),
(None, {"X-Forwarded-Proto": "https"}, "https"),
({}, None, "http"),
({}, {"X-Forwarded-Proto": "http"}, "http"),
({"Forwarded": "https"}, None, "http"),
({"Forwarded": "https"}, {"X-Forwarded-Proto": "http"}, "http"),
({"Forwarded": "https"}, {"X-Forwarded-Proto": "https"}, "http"),
({"Forwarded": "https"}, {"Forwarded": "http"}, "http"),
({"Forwarded": "https"}, {"Forwarded": "https"}, "https"),
],
)
async def test_https_middleware(
aiohttp_client, match_headers, request_headers, expected
):
client = await aiohttp_client(create_app(match_headers))
response = await client.get("/", headers=request_headers)
assert await response.json() == expected
|
lundgrenalex/mtrc | libs/tools/json.py | <filename>libs/tools/json.py<gh_stars>0
from functools import wraps
from flask import jsonify, request
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from werkzeug.exceptions import BadRequest
from importlib import import_module
import logging
def validate_schema(schema_name: str):
def decorator(f):
@wraps(f)
def wrapper(*args, **kw):
try:
schema = import_module('libs.schemas.' + schema_name, package=__name__)
validate(request.json, schema.schema)
except ValidationError as e:
logging.error(f'ValidationError: {e.message}, {request.data}')
return jsonify({
"error_code": 400,
"error_type": "ValidationError",
"error_message": e.message
}), 400
return f(*args, **kw)
return wrapper
return decorator
|
lundgrenalex/mtrc | libs/routes/exporter.py | import os
from flask import Blueprint, jsonify, request, Response
from libs.metric import exporter
import config
app = Blueprint('exporter', __name__, url_prefix='/metrics')
# Metrics page
@app.route('/', methods=['GET'])
def get_metrics():
return Response(exporter.get_metrics(), mimetype='text'), 200
@app.route('/', methods=['DELETE'])
def drop_metrics():
exporter.drop_all_metrics()
return {}, 200
|
lundgrenalex/mtrc | libs/tools/encoders.py | <gh_stars>0
import json
import datetime
import decimal
class FlaskEncoder(json.JSONEncoder):
'''Феншуйное преобразование обьектов в JSON'''
def default(self, obj):
if isinstance(obj, decimal.Decimal):
return str(obj)
elif isinstance(obj, datetime.datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
|
lundgrenalex/mtrc | libs/tools/utils.py | <reponame>lundgrenalex/mtrc<filename>libs/tools/utils.py<gh_stars>0
import os
def proj_path(project_root, *args):
'''Получить абсолютный путь к args в проекте'''
return os.path.realpath(os.path.join(project_root, *args))
|
lundgrenalex/mtrc | libs/metric/__init_.py | <gh_stars>0
from . import counter # noqa
from . import gauge # noqa
from . import average # noqa
|
lundgrenalex/mtrc | libs/routes/counter.py | <gh_stars>0
from flask import Blueprint, jsonify, request
from libs.tools.json import validate_schema
from libs.metric import counter
app = Blueprint('gateway-counter', __name__, url_prefix='/handler/counter')
@app.route('/', methods=['POST'])
@validate_schema('counter')
def get_counter():
result = counter.update(request.json)
return jsonify(result), 200
@app.route('/<string:metric_name>/', methods=['DELETE'])
def remove_metric(metric_name):
result = counter.remove(metric_name)
return jsonify(result), 200
|
lundgrenalex/mtrc | libs/storage/mongodb.py | <gh_stars>0
from pymongo import MongoClient
connection = None
def connect(conn_str='mongodb://mongodb:27017', pullSize=1024, **mongo_settings):
global connection
if not connection:
connection = MongoClient(
conn_str,
connect=False,
maxPoolSize=pullSize,
waitQueueMultiple=10,
waitQueueTimeoutMS=1000
)
return connection
|
lundgrenalex/mtrc | libs/routes/gauge.py | from flask import Blueprint, jsonify, request
from libs.tools.json import validate_schema
from libs.metric import gauge
app = Blueprint('gateway-gauge', __name__, url_prefix='/handler/gauge')
@app.route('/', methods=['POST'])
@validate_schema('gauge')
def get_gauge():
result = gauge.update(request.json)
return jsonify(result), 200
@app.route('/<string:metric_name>/', methods=['DELETE'])
def remove_metric(metric_name):
result = gauge.remove(metric_name)
return jsonify(result), 200
|
lundgrenalex/mtrc | application.py | <filename>application.py<gh_stars>0
import logging
from werkzeug.middleware.proxy_fix import ProxyFix
from libs.routes import create_app
import config
logging.basicConfig(
format='%(asctime)-15s %(levelname)-8s %(name)-1s: %(message)s',
level=logging.DEBUG,
filename='./logs/mrtc.log',
)
app = create_app()
if __name__ == '__main__':
app.wsgi_app = ProxyFix(app.wsgi_app)
app.run(
host=config.flask['host'],
port=config.flask['port'],
debug=config.flask['debug'])
|
lundgrenalex/mtrc | libs/storage/redis.py | '''Connection pool with Redis'''
import redis
connections_pool = {}
def connect(**redis_settings):
'''Get Redis connection'''
conn_key = ';'.join(['{}:{}'.format(k, str(v)) for k, v in redis_settings.items()])
if conn_key not in connections_pool:
redis_settings = redis_settings.copy()
connections_pool[conn_key] = redis.StrictRedis(**redis_settings, encoding="utf-8", decode_responses=True)
return connections_pool[conn_key]
|
lundgrenalex/mtrc | libs/schemas/gauge.py | schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "MetricItem",
"type": "object",
"properties": {
"name": {
"type": "string",
"pattern": "^[a-z_][a-z0-9_]*$",
},
"date": {"type": "integer"},
"value": {
"type": "number",
},
"labels": {
"type": "object",
"minProperties": 1,
"propertyNames": {
"pattern": "^[a-z_][a-z0-9_]*$"
},
"additionalProperties": {
"type": ["string", "null"]
}
}
},
"required": ["name", "value",]
}
|
lundgrenalex/mtrc | config.py | <reponame>lundgrenalex/mtrc<gh_stars>0
import logging
import logging.config
from werkzeug.security import generate_password_hash
import os
PROJECT_ROOT = os.path.realpath(os.path.dirname(__file__))
flask = {
'host': '127.0.0.1',
'port': '8087',
'debug': True,
'sentry_dsn': None,
}
# Overwrite config by your local
try:
from configs.local import * # noqa
except ImportError:
pass
|
lundgrenalex/mtrc | libs/metric/average.py | from libs.storage import mongodb
import json
import time
def update(metric: dict) -> bool:
db = mongodb.connect()
current_time = int(time.time())
saved_metric = db.mtrc.metrics.find_one({
'type': 'average',
'name': metric['name'],
'labels': metric['labels'],
'average': metric['average'],
})
if not saved_metric:
db.mtrc.metrics.insert_one({
'type': 'average',
'name': metric['name'],
'labels': metric['labels'],
'average': int(metric['average']),
'description': metric['description'],
'values': [{
'date': int(metric['date']),
'value': float(metric['value']),
}],
'value': float(metric['value']),
'date': current_time,
})
return True
# update range
saved_metric['values'].append({
'value': metric['value'],
'date': metric['date'],
})
# remove old
# filter range by average timeshist
new_values = []
for i in range(len(saved_metric['values'])):
if saved_metric['values'][i]['date'] + saved_metric['average'] > current_time:
new_values.append(saved_metric['values'][i])
saved_metric['values'] = new_values
# count average value and actual range
saved_metric['value'] = sum([
v['value'] for v in saved_metric['values']
]) / len(saved_metric['values'])
# touch date
saved_metric['date'] = current_time
# store new data
db.mtrc.metrics.update_one({'_id': saved_metric['_id']}, {
'$set': saved_metric
})
return True
def remove(metric_name: str) -> bool:
db = mongodb.connect()
db.mtrc.metrics.remove({'type': 'average', 'name': metric['name']})
return True
|
lundgrenalex/mtrc | libs/routes/average.py | from flask import Blueprint, jsonify, request
from libs.tools.json import validate_schema
from libs.metric import average
app = Blueprint('gateway-average', __name__, url_prefix='/handler/average')
@app.route('/', methods=['POST'])
@validate_schema('average')
def get_average():
result = average.update(request.json)
return jsonify(result), 200
@app.route('/<string:metric_name>/', methods=['DELETE'])
def remove_metric(metric_name):
result = average.remove(metric_name)
return jsonify(result), 200
|
lundgrenalex/mtrc | tests/test_exporter/test_api.py | import json
import unittest
from application import app
class TestingExporter(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
def test_exporter(self):
'''testing counter metric'''
response = self.app.get('/metrics', follow_redirects=True)
self.assertEqual(response.status_code, 200)
def test_remove_all_metrics(self):
'''testing average metric'''
response = self.app.delete('/metrics/', follow_redirects=True)
self.assertEqual(response.status_code, 200)
if __name__ == '__main__':
unittest.main()
|
lundgrenalex/mtrc | libs/metric/exporter.py | <reponame>lundgrenalex/mtrc<filename>libs/metric/exporter.py
from libs.storage import mongodb
import json
'''
FORMAT: https://prometheus.io/docs/instrumenting/exposition_formats/#basic-info
metric_name [
"{" label_name "=" `"` label_value `"` { "," label_name "=" `"` label_value `"` } [ "," ] "}"
] value [ timestamp ]
'''
def get_labels_string(labels: dict) -> str:
'''making prometheus format for labels'''
result =''
for key, value in labels.items():
result += f'{key}="{value}", '
result = '{' + result + '}'
return result.replace(', }', '}')
def get_metrics() -> str:
'''get metrics for prometheus'''
db = mongodb.connect()
result = ''
metrics = [metric for metric in db.mtrc.metrics.find()]
for metric in metrics:
if 'description' in metric:
result += f"# HELP {metric['name']} {metric['description']}\n"
result += f"# TYPE {metric['name']} {metric['type']}\n"
result += f"{metric['name']}_{metric['type']}{get_labels_string(metric['labels'])} {metric['value']}\n"
return result
def drop_all_metrics() -> bool:
'''drop all metrics'''
db = mongodb.connect()
return db.drop_database('mtrc')
|
lundgrenalex/mtrc | libs/routes/__init__.py | <reponame>lundgrenalex/mtrc
from flask import Flask, jsonify, g
from libs.tools.encoders import FlaskEncoder
import sentry_sdk
import config
from sentry_sdk.integrations.flask import FlaskIntegration
# incoming primitives
from .counter import app as counter_app
from .gauge import app as gauge_app
from .average import app as average_app
# exporter
from .exporter import app as exporter_app
def not_found(e):
return jsonify({
'error_code': 404,
'error_message': 'Not Found'
}), 404
def server_error(e):
return jsonify({
'error_code': 500,
'error_message': 'Server Error'
}), 500
def create_app():
sentry_sdk.init(
dsn=config.flask['sentry_dsn'], integrations=[FlaskIntegration()])
app = Flask(__name__)
app.register_blueprint(counter_app)
app.register_blueprint(gauge_app)
app.register_blueprint(average_app)
app.register_blueprint(exporter_app)
app.register_error_handler(404, not_found)
app.register_error_handler(500, server_error)
app.json_encoder = FlaskEncoder
return app
|
lundgrenalex/mtrc | tests/test_gauge/test_api.py | import json
import unittest
import requests
import time
from random import randrange
from application import app
class TestingGauge(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.fixture = json.dumps({
"name": "example",
"labels":{
"label1": "232",
"label2": "dsds"
},
"value": randrange(0, 101, 2),
"date": int(time.time()),
"description": "test data",
})
def test_gauge(self):
'''testing counter metric'''
response = self.app.post(
'/handler/gauge',
follow_redirects=True,
data=self.fixture,
content_type='application/json')
self.assertEqual(response.status_code, 200)
if __name__ == '__main__':
unittest.main()
|
lundgrenalex/mtrc | libs/tools/statsd.py | import logging
def get_metric(metric_name: str, raw_metrics: str):
"""Grab selected metric from statsd list of metrcs"""
def __get_labels(raw_metrics):
# get labels: cut {, then }
try:
labels = {}
raw_labels = raw_metric.split('{')[-1:][0]
raw_labels = raw_labels.split('}')[0].split(',')
for rl in raw_labels:
parsed_rl = rl.split('=')
labels[parsed_rl[0]] = parsed_rl[1].replace('"', '')
return labels
except Exception as e:
logging.error(e)
return {}
# make rows
metrics_in_rows = raw_metrics.split('\n')
# Get raw metric
raw_metric = [metric for metric in metrics_in_rows if not metric.find(metric_name)][0]
# get value of metric
value = raw_metric.split()[-1:][0]
return {'value': value, 'labels': __get_labels(raw_metrics)}
|
lundgrenalex/mtrc | libs/metric/counter.py | <reponame>lundgrenalex/mtrc<gh_stars>0
from libs.storage import mongodb
import time
def update(metric: dict) -> dict:
db = mongodb.connect()
current_time = int(time.time())
saved_metric = db.mtrc.metrics.find_one({
'type': 'counter',
'name': metric['name'],
'labels': metric['labels'],
'average': metric['average'],
})
if not saved_metric:
db.mtrc.metrics.insert_one({
'type': 'counter',
'name': metric['name'],
'labels': metric['labels'],
'average': int(metric['average']),
'description': metric['description'],
'value': float(metric['value']),
'date': current_time,
})
return True
db.mtrc.metrics.update_one({'_id': saved_metric['_id']}, {
'$set': {'date': current_time},
'$inc': {'value': float(metric['value'])},
})
return True
def remove(metric_name: str) -> bool:
db = mongodb.connect()
db.mtrc.metrics.remove({'type': 'counter', 'name': metric['name']})
return True
|
gdebayancmu/espnet | espnet2/asr/frontend/wavelet_frontend.py | import copy
import os
from typing import Optional
from typing import Tuple
from typing import Union
from ssqueezepy import cwt
import humanfriendly
import numpy as np
import torch
from torch_complex.tensor import ComplexTensor
from typeguard import check_argument_types
from espnet.nets.pytorch_backend.frontends.frontend import Frontend
from espnet2.asr.frontend.abs_frontend import AbsFrontend
from espnet2.layers.log_mel import LogMel
from espnet2.layers.stft import Stft
from espnet2.utils.get_default_kwargs import get_default_kwargs
class WaveletFrontEnd(AbsFrontend):
def __init__(
self,
fs: Union[int, str] = 16000,
win_length: int = 320,
hop_length: int = 160,
**kwargs
):
os.environ['SSQ_GPU'] = '1'
assert check_argument_types()
super().__init__()
if isinstance(fs, str):
fs = humanfriendly.parse_size(fs)
self.fs = fs
self.win_length = win_length
self.hop_length = hop_length
self.op_size = 221 # Currently Hardcoded
def output_size(self) -> int:
return self.op_size
def frame_with_overlap(self, signal: np.ndarray, Nw, Nsh) -> None:
"""
Creates overlapping windows (frames) of the input signal
input signal: Input audio signal
param window_length: window length (in samples)
param overlap_length: overlapping length (in samples)
"""
signal = signal.cpu()
signal_len = signal.shape[0]
num_frames = np.ceil(signal_len / Nsh)
framed_signal = np.zeros((int(num_frames), Nw))
i = 0
fr_id = 0
while i < signal_len and fr_id < num_frames:
pad_len = Nw - signal[i:i + Nw].shape[0]
framed_signal[fr_id, :] = np.pad(signal[i:i + Nw], (0, pad_len), 'constant', constant_values=(0, 0))
if i + Nw > signal_len:
break
fr_id += 1
i += Nsh
framed_signal = framed_signal[0:fr_id + 1, :]
return framed_signal
def forward(
self, input: torch.Tensor, input_lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
# print('----In Forward WAVELET------')
# print('input', input.shape)
# print('input_lengths', input_lengths)
batch_size = input.shape[0]
# 3. [Multi channel case]: Select a channel
if input.dim() == 4:
# h: (B, T, C, F) -> h: (B, T, F)
if self.training:
# Select 1ch randomly
ch = np.random.randint(input.size(2))
input = input[:, :, ch, :]
else:
# Use the first channel
input = input[:, :, 0, :]
wavelet_batch = []
for batch in range(0, batch_size):
torch.cuda.empty_cache()
framed_signal = self.frame_with_overlap(input[batch], Nw=self.win_length, Nsh=self.hop_length)
Wxo, dWx = cwt(framed_signal, fs=self.fs, nv=32)
# print('wxo size', Wxo.shape)
wxo_abs = torch.mean(torch.abs(torch.tensor(Wxo)), dim=2)
wxo_abs = torch.reshape(wxo_abs, (1, -1, self.op_size))
wavelet_batch.append(wxo_abs)
# print('wavelet_batch', len(wavelet_batch))
input_feats = torch.cat(wavelet_batch, dim=0)
feats_lens = torch.tensor([torch.div(x, self.hop_length, rounding_mode='floor') for x in input_lengths])
# print('RETURNING WAVELET input_feats.shape', input_feats.shape, 'feat lens', feats_lens)
return input_feats, feats_lens
|
metricjar/atom-python | ironsource/atom/config.py | <reponame>metricjar/atom-python<gh_stars>0
# Atom Python SDK config file
SDK_VERSION = "1.5.4"
ATOM_ENDPOINT = "http://track.atom-data.io/"
# Tracker Config
BATCH_SIZE = 256
BATCH_SIZE_LIMIT = 2000
BATCH_BYTES_SIZE = 64 * 1024
BATCH_BYTES_SIZE_LIMIT = 512 * 1024
# Default flush interval in milliseconds
FLUSH_INTERVAL = 10000
# Batch Event Pool Config
# Default Number of workers(threads) for BatchEventPool
BATCH_WORKER_COUNT = 1
# Default Number of batch events to hold in BatchEventPool
BATCH_POOL_SIZE = 1
# EventStorage Config (backlog)
# Default backlog queue size (per stream)
BACKLOG_SIZE = 500
# Retry on 500 / Connection error conf
# Retry max time in seconds
RETRY_MAX_TIME = 1800
# Maximum number of retries (set it to 1 in order to disable retry). This value is ignored if RETRY_FOREVER = True
RETRY_MAX_COUNT = 12
# Base multiplier for exponential backoff calculation
RETRY_EXPO_BACKOFF_BASE = 3
# Should the worker in BatchEventPool retry forever on server error (recommended)
RETRY_FOREVER = True
# Tracker backlog conf
# Tracker backlog Queue GET & PUT Block or not.
BACKLOG_BLOCKING = True
# Tracker backlog Queue GET & PUT timeout in seconds (ignored if backlog is blocking)
BACKLOG_TIMEOUT = 1
# HTTP requests lib session GET/POST timeout in seconds (default: 60 seconds)
REQUEST_TIMEOUT = 60
# Debug file path once debug_to_file is enabled
DEBUG_FILE_PATH = "/tmp/"
|
metricjar/atom-python | ironsource/atom/ironsource_atom_tracker.py | import json
import signal
import Queue
from ironsource.atom.ironsource_atom import IronSourceAtom
from ironsource.atom.queue_event_storage import QueueEventStorage
from ironsource.atom.batch_event_pool import BatchEventPool
from ironsource.atom.event import Event
import ironsource.atom.atom_logger as logger
import ironsource.atom.config as config
import time
import random
from threading import Lock
from threading import Thread
class IronSourceAtomTracker:
"""
ironSource Atom high level API class (Tracker), supports: track() and flush()
"""
TAG = "IronSourceAtomTracker"
def __init__(self,
batch_worker_count=config.BATCH_WORKER_COUNT,
batch_pool_size=config.BATCH_POOL_SIZE,
event_backlog=None,
backlog_size=config.BACKLOG_SIZE,
flush_interval=config.FLUSH_INTERVAL,
retry_max_time=config.RETRY_MAX_TIME,
retry_max_count=config.RETRY_MAX_COUNT,
batch_size=config.BATCH_SIZE,
batch_bytes_size=config.BATCH_BYTES_SIZE,
is_debug=False,
debug_to_file=False,
debug_file_path=config.DEBUG_FILE_PATH,
endpoint=config.ATOM_ENDPOINT,
auth_key="",
callback=None,
retry_forever=config.RETRY_FOREVER,
is_blocking=config.BACKLOG_BLOCKING,
backlog_timeout=config.BACKLOG_TIMEOUT,
request_timeout=config.REQUEST_TIMEOUT):
"""
Tracker init function
:param batch_worker_count: Optional, Number of workers(threads) for BatchEventPool
:type batch_worker_count: int
:param batch_pool_size: Optional, Number of events to hold in BatchEventPool
:type batch_pool_size: int
:param event_backlog: Optional, Custom EventStorage implementation
:type event_backlog: object
:param backlog_size: Optional, Backlog queue size (EventStorage ABC implementation)
:type backlog_size: int
:param flush_interval: Optional, Tracker flush interval in milliseconds (default 10000)
:type flush_interval: int
:param retry_max_time: Optional, Retry max time in seconds
:type retry_max_time: int
:param retry_max_count: Optional, Maximum number of retries in seconds
:type retry_max_count: int
:param batch_size: Optional, Amount of events in every batch (bulk) (default: 500)
:type batch_size: int
:param batch_bytes_size: Optional, Size of each batch (bulk) in bytes (default: 64KB)
:type batch_bytes_size: int
:param is_debug: Optional, Enable printing of debug information
:type is_debug: bool
:param debug_to_file: Optional, Should the Tracker write the request and response objects to file
:type debug_to_file: bool
:param debug_file_path: Optional, the path to the debug file (debug_to_file must be True) (default: /tmp)
:type debug_file_path: str
:param endpoint: Optional, Atom endpoint
:type endpoint: str
:param auth_key: Optional, Default auth key to use (when none is provided in .track)
:type auth_key: str
:param callback: Optional, callback to be called on error (Client 400/ Server 500)
:type callback: function
:param retry_forever: Optional, should the BatchEventPool retry forever on server error (default: True)
:type retry_forever: bool
:param is_blocking: Optional, should the tracker backlog block (default: True)
:type is_blocking: bool
:param backlog_timeout: Optional, tracker backlog block timeout (ignored if is_blocking, default: 1 second)
:type backlog_timeout: bool
:param request_timeout: Optional, HTTP requests lib session GET/POST timeout in seconds (default: 60 seconds)
:type request_timeout: int
"""
# Init Atom basic SDK
self._is_debug = is_debug
# For debug printing
self._debug_counter = 0
self._atom = IronSourceAtom(endpoint=endpoint,
is_debug=self._is_debug,
auth_key=auth_key,
request_timeout=request_timeout,
debug_to_file=debug_to_file,
debug_file_path=debug_file_path)
self._logger = logger.get_logger(debug=self._is_debug)
# Optional callback to be called on error, convention: time, status, error_msg, data
self._callback = callback if callable(callback) else lambda timestamp, status, error_msg, data, stream: None
self._is_run_worker = True
self._flush_all = False
self._alive = True
# Lock of accessing the stream_keys dict
self._data_lock = Lock()
# Streams to keys map
self._stream_keys = {}
# Retry with exponential backoff config
# Retry max time
if not isinstance(retry_max_time, int) or retry_max_time < 120:
self._logger.warning("Retry Max Time must be 120 or greater! Setting default: {}"
.format(config.RETRY_MAX_TIME))
retry_max_time = config.RETRY_MAX_TIME
self._retry_max_time = retry_max_time
# Retry max count
if not isinstance(retry_max_count, int) or retry_max_count < 1:
self._logger.warning("Retry Max Count must be 1 or greater! Setting default: {}"
.format(config.RETRY_MAX_COUNT))
retry_max_count = config.RETRY_MAX_COUNT
self._retry_max_count = retry_max_count
# Batch size
if not isinstance(batch_size, int) or batch_size < 1 or batch_size > config.BATCH_SIZE_LIMIT:
self._logger.warning("Invalid Bulk size, must between 1 to {max}, setting it to {default}"
.format(max=config.BATCH_SIZE_LIMIT, default=config.BATCH_SIZE))
batch_size = config.BATCH_SIZE
self._batch_size = batch_size
# Batch bytes size
if not isinstance(batch_bytes_size, int) \
or batch_bytes_size < 1024 \
or batch_bytes_size > config.BATCH_BYTES_SIZE_LIMIT:
self._logger.warning("Invalid Bulk byte size, must between 1KB to {max}KB, setting it to {default}KB"
.format(max=config.BATCH_BYTES_SIZE_LIMIT / 1024,
default=config.BATCH_BYTES_SIZE / 1024))
batch_bytes_size = config.BATCH_BYTES_SIZE
self._batch_bytes_size = batch_bytes_size
# Flush Interval
if not isinstance(flush_interval, int) or flush_interval < 1000:
self._logger.warning("Flush Interval must be 1000ms or greater! Setting default: {}"
.format(config.FLUSH_INTERVAL))
flush_interval = config.FLUSH_INTERVAL
self._flush_interval = flush_interval
# Holds the events after .track method
self._event_backlog = event_backlog if event_backlog else QueueEventStorage(queue_size=backlog_size,
block=is_blocking,
timeout=backlog_timeout)
# Retry forever on server error (500) - When False and no callback is provided it may cause data loss
self._retry_forever = retry_forever
# Holds batch of events for each stream and sends them using {thread_count} workers
self._batch_event_pool = BatchEventPool(thread_count=batch_worker_count,
max_events=batch_pool_size)
# Start the handler thread - daemon since we want to exit even if it didn't stop yet
handler_thread = Thread(target=self._tracker_handler)
handler_thread.daemon = True
handler_thread.start()
# Start the thread that handles periodic flushing
timer_thread = Thread(target=self._flush_peroidcly)
timer_thread.daemon = True
timer_thread.start()
# Intercept exit signals
signal.signal(signal.SIGTERM, self._graceful_kill)
signal.signal(signal.SIGINT, self._graceful_kill)
def stop(self):
"""
Stop worker thread and event_pool thread's
"""
self._logger.info("Flushing all data and killing the tracker in 5 seconds...")
self._flush_all = True
self._alive = False
i = 0
while True:
# Check if everything is empty or 5 seconds has passed
if self._batch_event_pool.is_empty() and self._event_backlog.is_empty() or i == 5:
self._logger.warning("BatchPool and Backlog are empty or 5 seconds have passed, killing the tracker")
self._is_run_worker = False
self._batch_event_pool.stop()
break
i += 1
time.sleep(1)
def set_debug(self, is_debug): # pragma: no cover
"""
Enable / Disable debug
:param is_debug: Enable printing of debug information
:type is_debug: bool
"""
self._is_debug = is_debug if isinstance(is_debug, bool) else False
self._logger = logger.get_logger(debug=self._is_debug)
self._atom.set_debug(self._is_debug)
def track(self, stream, data, auth_key=""):
"""
Track event
:param stream: Atom stream name
:type stream: str
:param data: Data to send (payload) (dict or string)
:type data: object
:param auth_key: HMAC auth key for stream
:type auth_key: str
"""
if len(auth_key) == 0:
auth_key = self._atom.get_auth()
if not isinstance(data, str):
try:
data = json.dumps(data)
except TypeError as e:
self._error_log(0, time.time(), 400, str(e), data, stream)
return
with self._data_lock:
if stream not in self._stream_keys:
self._stream_keys[stream] = auth_key
try:
self._event_backlog.add_event(Event(stream, data))
self._debug_counter += 1
except Queue.Full:
self._error_log(0, time.time(), 400, "Tracker backlog is full, can't enqueue events", data, stream)
def flush(self):
"""
Flush data from all streams
"""
self._flush_all = True
def _flush_peroidcly(self):
"""
Flush everything every {flush_interval}
Note: the time.time() is used cause python is not accurate enough and adds a delay
when using time.sleep(x) (where x is a constant)
"""
next_call = time.time()
i = 0
while self._is_run_worker:
if i == 10000:
i = 0
# Divide by 1000 since flush_interval is provided in milliseconds
next_call += self._flush_interval / 1000
# This part is here only for better debugging
if i % 2 == 0:
self._logger.debug("Flushing In {} Seconds".format(next_call - time.time()))
i += 1
try:
time.sleep(next_call - time.time())
self.flush()
except (IOError, ValueError) as e:
# Can happen after sleep
self._logger.error("Timer error: {}".format(str(e.args)))
next_call = time.time()
def _tracker_handler(self):
"""
Main tracker function, handles flushing based on given conditions
"""
# Buffer between backlog and batch pool
events_buffer = {}
# Dict to hold events size for every stream
batch_bytes_size = {}
self._logger.info("Tracker Handler Started")
def flush_data(stream, auth_key):
# This 'if' is needed for the flush_all case
if stream in events_buffer and len(events_buffer[stream]) > 0:
temp_buffer = list(events_buffer[stream])
del events_buffer[stream][:]
batch_bytes_size[stream] = 0
self._batch_event_pool.add_event(lambda: self._flush_data(stream, auth_key, temp_buffer))
while self._is_run_worker:
if self._event_backlog.is_empty():
time.sleep(2)
if self._flush_all:
for stream_name, stream_key in self._stream_keys.items():
flush_data(stream_name, stream_key)
if self._alive:
self._flush_all = False
else:
for stream_name, stream_key in self._stream_keys.items():
# Get one event from the backlog
try:
event_object = self._event_backlog.get_event(stream_name)
except Queue.Empty:
continue
if event_object is None:
continue
if stream_name not in batch_bytes_size:
batch_bytes_size[stream_name] = 0
if stream_name not in events_buffer:
events_buffer[stream_name] = []
batch_bytes_size[stream_name] += len(event_object.data.encode("utf8"))
events_buffer[stream_name].append(event_object.data)
if batch_bytes_size[stream_name] >= self._batch_bytes_size:
flush_data(stream_name, auth_key=stream_key)
if len(events_buffer[stream_name]) >= self._batch_size:
flush_data(stream_name, auth_key=stream_key)
self._logger.info("Tracker handler stopped")
def _flush_data(self, stream, auth_key, data):
"""
Send data to server using IronSource Atom Low-level API
NOTE: this function is passed a lambda to the BatchEventPool so it might continue running if it was
triggered already even after a graceful killing for at least (retry_max_count) times
"""
attempt = 1
while True:
try:
response = self._atom.put_events(stream, data=data, auth_key=auth_key)
except Exception as e:
self._error_log(attempt, time.time(), 400, str(e), data, stream)
return
# Response on first try
if attempt == 1:
self._logger.debug('Got Status: {}; Data: {}'.format(str(response.status), str(data)))
# Status 200 - OK or 400 - Client Error
if 200 <= response.status < 500:
if 200 <= response.status < 400:
if self._debug_counter >= 1000:
self._logger.info('Tracked 1000 events to Atom')
self._logger.info('Status: {}; Response: {}; Error: {}'.format(str(response.status),
str(response.data),
str(response.error)))
self._debug_counter = 0
else:
# 400
self._error_log(attempt, time.time(), response.status, response.error, data, stream)
return
# Server Error >= 500:
# This should run forever (when we get a 500) unless retry_forever is False
# In this case we call error_log() function and data will be lost (you can save it with the callback)
if not self._retry_forever and attempt == self._retry_max_count:
self._error_log(attempt, time.time(), 500, "Retry Max Count has been reached, discarding data", data,
stream)
break
# In Case we are in a graceful shutdown and we get a 500 > Call the error_log func
if not self._is_run_worker:
self._error_log(attempt, time.time(), 500, "Server error while on graceful shutdown", data,
stream)
break
# Retry with exponential backoff
duration = self._get_duration(attempt)
self._logger.warn(
"Got code: {status} from server, error: {error}. stream: {stream}, retry duration: {duration}".format(
status=response.status,
error=response.error,
stream=stream,
duration=duration))
attempt += 1
time.sleep(duration)
def _get_duration(self, attempt):
"""
Exponential back-off + Full Jitter
:param attempt: attempt number
:type attempt: int
"""
exponential_backoff = min(self._retry_max_time, pow(2, attempt) * config.RETRY_EXPO_BACKOFF_BASE)
return random.uniform(0, exponential_backoff)
def _graceful_kill(self, sig, frame):
"""
Tracker exit handler
:param frame: current stack frame
:type frame: frame
:param sig: integer
:type sig: OS signal number
"""
self._logger.info('Intercepted signal %s' % sig)
if not self._is_run_worker:
return
self.stop()
def _error_log(self, attempt, unix_time=None, status=None, error_msg=None, sent_data=None, stream=None):
"""
Log an error and send it to a callback function (if defined by user)
:param attempt: Sending attempt to atom
:type attempt: int
:param unix_time: Unix(epoch) timestamp
:type unix_time: float
:param status: HTTP status
:type status: int
:param error_msg: Error msg from server
:type error_msg: str
:param sent_data: Data that was sent to server
:type sent_data: object
:param stream: Atom Stream name
:type stream: str
"""
try:
self._callback(unix_time, status, error_msg, sent_data, stream)
except TypeError as e:
self._logger.error('Wrong arguments given to callback function: {}'.format(e))
self._logger.error("Error: {}; Status: {}; Attempt: {}; For Data: {:.50}...".format(error_msg,
status,
attempt,
sent_data))
|
metricjar/atom-python | setup.py | <filename>setup.py
#!/usr/bin/env python
import sys
from setuptools import setup, find_packages
from pip.req import parse_requirements
from pip.download import PipSession
from ironsource import __version__
install_reqs = parse_requirements('requirements.txt', session=PipSession())
reqs = [str(ir.req) for ir in install_reqs]
tests_require = ['nose', 'mock', 'responses', 'flake8', 'tox']
if sys.version_info < (2, 7):
tests_require.append('unittest2')
setup(
name="ironsource-atom",
version=__version__,
description="ironSource.atom Python SDK",
packages=["ironsource", "ironsource.atom"],
author="ironSource.atom",
author_email="<EMAIL>",
url="https://github.com/ironSource/atom-python",
tests_require=tests_require,
test_suite='nose.collector',
license='MIT',
install_requires=reqs,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
metricjar/atom-python | ironsource/atom/event.py | class Event:
"""
Event object - Holds a single atom event (inside EventStorage)
:param stream: Atom stream name
:type stream: basestring
:param data: Payload data to send
:type data: object
"""
def __init__(self, stream, data):
self.stream = stream
self.data = data
|
metricjar/atom-python | ironsource_example/example.py | # -*- coding: utf-8 -*-
import json
import random
from ironsource.atom.ironsource_atom import IronSourceAtom
from ironsource.atom.ironsource_atom_tracker import IronSourceAtomTracker
import time
from threading import Thread
from threading import Lock
if __name__ == "__main__":
stream = "YOUR_TARGET.public.atom_demo_events"
auth_key = "YOUR_KEY"
api_ = IronSourceAtom(is_debug=False, endpoint="https://track.atom-data.io/")
# put_event() GET example
print ("==== GET REQUEST TO ATOM WITH JSON DUMPS ====")
data_get = {"id": 1337, "event_name": "PYTHON_SDK_GET_EXAMPLE_STRING", "string_value": str(random.random())}
response_get = api_.put_event(stream=stream, data=json.dumps(data_get), method="get", auth_key=auth_key)
print ("GET Response data: " + str(response_get.data) + "; error: " + str(response_get.error) +
"; status: " + str(response_get.status))
print ("\n==== GET REQUEST TO ATOM WITH DICT ====")
data_get = {"id": 1338, "event_name": "PYTHON_SDK_GET_EXAMPLE_DICT", "string_value": str(random.random())}
response_get = api_.put_event(stream=stream, data=data_get, method="get", auth_key=auth_key)
print ("GET Response data: " + str(response_get.data) + "; error: " + str(response_get.error) +
"; status: " + str(response_get.status))
# put_event() POST example
print ("\n==== POST REQUEST TO ATOM WITH JSON DUMPS ====")
data_post = {"id": 1339, "event_name": "PYTHON_SDK_POST_EXAMPLE_STRING", "string_value": str(random.random())}
response_post = api_.put_event(stream=stream, data=json.dumps(data_post), auth_key=auth_key)
print ("POST Response data: " + str(response_post.data) + "; error: " + str(response_post.error) +
"; status: " + str(response_post.status))
print ("\n==== POST REQUEST TO ATOM WITH DICT ====")
data_post = {"id": 1440, "event_name": "PYTHON_SDK_POST_EXAMPLE_DICT", "string_value": str(random.random())}
response_post = api_.put_event(stream=stream, data=data_post, auth_key=auth_key)
print ("POST Response data: " + str(response_post.data) + "; error: " + str(response_post.error) +
"; status: " + str(response_post.status))
# put_events example
print ("\n==== BATCH POST REQUEST TO ATOM ====")
data_post = [{"id": 1441, "event_name": "PYTHON_SDK_BATCH_POST_EXAMPLE", "string_value": str(random.random())},
{"id": 1442, "event_name": "PYTHON_SDK_BATCH_POST_EXAMPLE", "string_value": str(random.random())}]
response_post = api_.put_events(stream=stream, data=data_post, auth_key=auth_key)
print ("POST Response data: " + str(response_post.data) + "; error: " + str(response_post.error) +
"; status: " + str(response_post.status))
# Tracker example
print ("\n==== TESTING ATOM TRACKER ====")
def callback_func(timestmap, status, msg, data, stream):
# Tracker callback function example
print("[EXAMPLE CALLBACK FUNCTION] timestamp: {}, status: {} message: {}".format(timestmap, status, msg))
# endpoint = "http://127.0.0.1:3000/"
endpoint = "http://track.atom-data.io/"
api_tracker = IronSourceAtomTracker(flush_interval=10000,
callback=callback_func,
batch_bytes_size=64 * 1024,
batch_size=64,
is_debug=True,
debug_to_file=True,
debug_file_path="./",
endpoint=endpoint)
class ThreadClass:
def __init__(self):
self._call_index = 0
self._thread_lock = Lock()
def thread_worker(self, args):
print("[EXAMPLE] Thread {} started".format(args))
while True:
with self._thread_lock:
self._call_index += 1
data_track = {"id": self._call_index, "event_name": "PYTHON_SDK_TRACKER_EXAMPLE",
"string_value": str(random.random()),
"non_ascii": "Lista de leitura, novos recursos de privacidade e segurança, "
"além de mais velocidade Esses são os atrativos do novo Safari"}
# exit after 100
if self._call_index >= 100:
return
else:
# Track every 10th event with delay
if self._call_index % 10 == 0:
time.sleep(1)
print("[EXAMPLE] Tracking Data")
api_tracker.track(stream=stream, data=data_track, auth_key=auth_key)
threads_array = []
thread_instance = ThreadClass()
for index in range(0, 10):
thread_index = index
thread = Thread(target=thread_instance.thread_worker, args=[thread_index])
threads_array.append(thread)
for thread in threads_array:
thread.start()
for thread in threads_array:
thread.join()
print ("Finished all example methods.")
time.sleep(1000000)
|
metricjar/atom-python | ironsource/atom/batch_event_pool.py | <reponame>metricjar/atom-python<gh_stars>0
from threading import Thread
from Queue import Queue
class BatchEventPool:
"""
Batch Event Pool constructor
:param thread_count: Count of working threads
:type thread_count: int
:param max_events: Max count of events in queue
:type max_events: int
"""
def __init__(self, thread_count, max_events):
self._events = Queue(maxsize=max_events)
self._is_running = True
self._max_events = max_events
self._workers = []
for index in range(0, thread_count):
thread = Thread(target=self.task_worker)
self._workers.append(thread)
thread.start()
def stop(self):
"""
Stop all working threads
"""
# The put event is here to unblock the task_worker (get() is blocking)
self._events.put(lambda: 0)
self._is_running = False
def task_worker(self):
"""
Worker method - for call action lambda
"""
while self._is_running:
func = self._events.get()
func()
def add_event(self, event_action):
"""
Add event for task pool
:param timeout: timeout in seconds (Raises Full exception after x seconds)
:type timeout: int
:param block: put an item if one is available, else raise Full exception (timeout is ignored in that case).
:type block: bool
:param event_action: event lambda
:type event_action: lambda
:raises: EventTaskPoolException
"""
self._events.put(event_action)
def is_empty(self):
"""
Check if the event pool is empty
:return: True if empty, else False
"""
return self._events.empty()
|
metricjar/atom-python | ironsource/atom/request.py | <filename>ironsource/atom/request.py
import base64
import requests.exceptions
from ironsource.atom.response import Response
class Request:
"""
Wrapper for HTTP requests to Atom API
"""
def __init__(self, endpoint, data, session, timeout):
"""
:param endpoint: Atom API endpoint
:type endpoint: str
:param data: Data that will be sent to server
:type data: str
:param session: requests.Session object
:type session: function
:param timeout: request timeout
"""
self._url = endpoint
self._data = data
self._session = session
self._timeout = timeout
def get(self):
"""
Request with GET method
This method encapsulates the data object with base64 encoding and sends it to the service.
Sends the request according to the REST API specification
:return: Response object from server
:rtype: Response
"""
base64_str = base64.encodestring(('%s' % self._data).encode()).decode().replace('\n', '')
params = {'data': base64_str}
try:
response = self._session.get(self._url, params=params, timeout=self._timeout)
except requests.exceptions.ConnectionError as ex: # pragma: no cover
response = ex
return Response("No connection to server", None, 500, response)
except requests.exceptions.RequestException as ex: # pragma: no cover
response = ex
return Response(ex, None, 400, response)
if 200 <= response.status_code < 400:
return Response(None, response.content, response.status_code, response)
else:
return Response(response.content, None, response.status_code, response)
def post(self):
"""
Request with POST method
This method encapsulates the data and sends it to the service.
Sends the request according to the REST API specification.
:return: Response object from server
:rtype: Response
"""
try:
response = self._session.post(url=self._url, data=self._data, timeout=self._timeout)
except requests.exceptions.ConnectionError as ex: # pragma: no cover
response = ex
return Response("No connection to server", None, 500, response)
except requests.exceptions.RequestException as ex: # pragma: no cover
response = ex
return Response(ex, None, 400, response)
if 200 <= response.status_code < 400:
return Response(None, response.content, response.status_code, response)
else:
return Response(response.content, None, response.status_code, response)
|
metricjar/atom-python | ironsource/test/test_api.py | import responses
import json
import base64
import unittest
try:
# python 3
from urllib.parse import urlparse, quote
from unittest.mock import MagicMock
except ImportError:
# python 2
from urlparse import urlparse
from urllib import quote
from mock import MagicMock
from ironsource.atom import ironsource_atom
class TestApiSetterGetter(unittest.TestCase):
def setUp(self):
self.url = "http://track.atom-data.io/"
self.data = {"event_name": "test", "id": "2"}
self.stream = "streamname"
self.auth_key = "test_key"
self.atom_client = ironsource_atom.IronSourceAtom(auth_key=self.auth_key)
def test_auth(self):
self.assertEqual(self.atom_client.get_auth(), self.auth_key)
def test_endpoint(self):
self.assertEqual(self.atom_client._endpoint, self.url)
class TestApiGET(unittest.TestCase):
def setUp(self):
self.url = "http://track.atom-data.io/"
self.data = {"event_name": "test", "id": "2"}
self.stream = "streamname"
self.atom_client = ironsource_atom.IronSourceAtom()
@responses.activate
def test_api_get_calls(self):
responses.add(responses.GET, self.url, json=self.data, status=200)
self.atom_client.put_event(stream=self.stream, data=json.dumps(self.data), method="get", auth_key="test_auth")
self.assertEqual(len(responses.calls), 1, len(responses.calls))
@responses.activate
def test_api_get_response(self):
responses.add(responses.GET, self.url, json={"Status": "Ok"}, status=200)
res = self.atom_client.put_event(stream=self.stream, data=json.dumps(self.data), method="get")
self.assertEqual(res.data, b"{\"Status\": \"Ok\"}")
@responses.activate
def test_put_event_post_error(self):
responses.add(responses.GET, self.url, json={"error": "fail"}, status=501)
res = self.atom_client.put_event(stream=self.stream, data=json.dumps(self.data), method="get")
self.assertEqual(res.status, 501)
class TestApiPost(unittest.TestCase):
def setUp(self):
self.url = "http://track.atom-data.io/"
self.data = {"event_name": "test", "id": "2"}
self.stream = "streamname"
self.atom_client = ironsource_atom.IronSourceAtom()
@responses.activate
def test_api_post_calls(self):
responses.add(responses.POST, self.url, json=self.data, status=200)
self.atom_client.put_event(stream=self.stream, data=json.dumps(self.data), auth_key="test_auth")
self.assertEqual(len(responses.calls), 1, len(responses.calls))
@responses.activate
def test_api_post_response(self):
responses.add(responses.POST, self.url, json={"Status": "Ok"}, status=200)
res = self.atom_client.put_event(stream=self.stream, data=json.dumps(self.data))
self.assertEqual(res.data, b"{\"Status\": \"Ok\"}")
class TestPutEvent(unittest.TestCase):
def setUp(self):
self.url = "http://track.atom-data.io/"
self.data = {"event_name": "test", "id": "2"}
self.stream = "streamname"
self.atom_client = ironsource_atom.IronSourceAtom()
@responses.activate
def test_put_event_get(self):
responses.add(responses.GET, self.url, json={"Status": "Ok"}, status=200)
self.atom_client.put_event(stream=self.stream, data=json.dumps(self.data), method="get")
self.assertEqual(responses.calls[0].request.method, "GET", "Method called should be GET")
@responses.activate
def test_put_event_post(self):
responses.add(responses.POST, self.url, json={"Status": "Ok"}, status=200)
self.atom_client.put_event(stream=self.stream, data=json.dumps(self.data), method="POST")
self.assertEqual(responses.calls[0].request.method, "POST")
@responses.activate
def test_put_event_post_error(self):
responses.add(responses.POST, self.url, json={"error": "fail"}, status=501)
res = self.atom_client.put_event(stream=self.stream, data=json.dumps(self.data), method="POST")
self.assertEqual(res.status, 501)
class TestPutEvents(unittest.TestCase):
def setUp(self):
self.url = "http://track.atom-data.io/"
self.data = [{"event_name": "test", "id": "2"}, {"event_name": "2nd", "id": "3"}]
self.stream = "streamname"
self.atom_client = ironsource_atom.IronSourceAtom()
@responses.activate
def test_perform_call(self):
responses.add(responses.POST, self.url, json=self.data, status=200)
self.atom_client.put_events(stream=self.stream, data=self.data)
self.assertEqual(len(responses.calls), 1, len(responses.calls))
@responses.activate
def test_should_receive_data_list(self):
responses.add(responses.POST, self.url, json=self.data, status=200)
self.assertRaises(Exception, self.atom_client.put_events, stream=self.stream, data={"event": "name"})
|
metricjar/atom-python | ironsource/atom/__init__.py | <filename>ironsource/atom/__init__.py
__author__ = 'Atom Core Team'
|
metricjar/atom-python | ironsource/atom/ironsource_atom.py | import json
import hmac
import datetime
import requests
import hashlib
import ironsource.atom.atom_logger as logger
from ironsource.atom.request import Request
import ironsource.atom.config as config
import uuid
import os
class IronSourceAtom:
"""
ironSource Atom low level API. supports put_event() and put_events() methods.
"""
TAG = "IronSourceAtom"
def __init__(self, is_debug=False, endpoint=config.ATOM_ENDPOINT, auth_key="", request_timeout=60,
debug_to_file=False,
debug_file_path=config.DEBUG_FILE_PATH):
"""
Atom class init function
:param is_debug: Optional, Enable/Disable debug
:type is_debug: bool
:param endpoint: Optional, Atom API Endpoint
:type endpoint: str
:param auth_key: Optional, Atom auth key
:type auth_key: str
:param request_timeout: Optional, request timeout (default: 60)
:type request_timeout: int
:param debug_to_file: Optional, Should the Tracker write the request and response objects to file
:type debug_to_file: bool
:param debug_file_path: Optional, the path to the debug file (debug_to_file must be True) (default: /tmp)
:type debug_file_path: str
"""
self._endpoint = endpoint
self._auth_key = auth_key
self._is_debug = is_debug
self._timeout = request_timeout
self._headers = {
'x-ironsource-atom-sdk-type': 'python',
'x-ironsource-atom-sdk-version': config.SDK_VERSION,
'Content-Type': 'application/json'
}
# init logger
self._logger = logger.get_logger(debug=self._is_debug)
self._debug_to_file = debug_to_file
if self._debug_to_file:
if os.path.isdir(debug_file_path) and os.access(debug_file_path, os.W_OK | os.R_OK):
self._debug_file_path = debug_file_path
else:
self._debug_file_path = config.DEBUG_FILE_PATH
self._logger.error("No Read & Write access to the supplied log file path, setting default: {}"
.format(config.DEBUG_FILE_PATH))
now = datetime.datetime.now()
log_file_name = self._debug_file_path + "atom-raw.{day}-{month}.json".format(day=now.day, month=now.month)
self._raw_logger = logger.get_logger(name="AtomRawLogger", file_name=log_file_name)
def set_debug(self, is_debug): # pragma: no cover
"""
Enable / Disable debug
:param is_debug: Enable printing of debug information
:type is_debug: bool
"""
self._is_debug = is_debug if isinstance(is_debug, bool) else False
self._logger = logger.get_logger(debug=self._is_debug)
def get_auth(self):
"""
Get HMAC authentication key
:rtype: str
"""
return self._auth_key
def put_event(self, stream, data, method="POST", auth_key=""):
"""Send a single event to Atom API
This method exposes two ways of sending your events. Either by HTTP(s) POST or GET.
:param method: The HTTP(s) method to use when sending data - default is POST
:type method: str
:param stream: Atom Stream name
:type stream: str
:param data: Single data (payload) event that will be sent to the server (string or dict)
:type data: object
:param auth_key: Hmac auth key
:type auth_key: str
:return: requests response object
"""
if not data or not stream:
raise Exception("Stream and/or Data are missing")
if len(auth_key) == 0:
auth_key = self._auth_key
request_time = datetime.datetime.now().isoformat()
request_data = self.create_request_data(stream, auth_key, data)
response = self.send_data(url=self._endpoint, data=request_data, method=method, headers=self._headers,
timeout=self._timeout)
if self._debug_to_file:
self._session_to_file(response, request_time)
return response
def put_events(self, stream, data, auth_key=""):
"""Send multiple events (batch) to Atom API
This method receives a list of dictionaries, transforms them into JSON objects and sends them
to Atom using HTTP(s) POST.
:param stream: Atom Stream name
:type stream: str
:param data: List of strings or dictionaries that will be sent to Atom
:type data: list(object)
:param auth_key: Optional, Hmac auth key
:type auth_key: str
:return: requests response object
"""
if not isinstance(data, list) or not data:
raise Exception("Data has to be of a non-empty list")
if not stream:
raise Exception("Stream is required")
if len(auth_key) == 0:
auth_key = self._auth_key
data = json.dumps(data)
request_data = self.create_request_data(stream, auth_key, data, batch=True)
request_time = datetime.datetime.now().isoformat()
response = self.send_data(url=self._endpoint + "bulk", data=request_data, method="post", headers=self._headers,
timeout=self._timeout)
if self._debug_to_file:
self._session_to_file(response, request_time)
return response
@staticmethod
def create_request_data(stream, auth_key, data, batch=False):
"""
Create json data string from input data
:param stream: Atom stream name
:type stream: str
:param auth_key: Hmac auth key
:type auth_key: str
:param data: Data that will be sent to the Atom
:type data: object
:param batch: Send data by batch(bulk)
:type batch: bool
:return: Serialized data as JSON
:rtype: str
"""
if not isinstance(data, str):
try:
data = json.dumps(data)
except TypeError:
raise Exception("Cannot Encode JSON")
request_data = {"table": stream, "data": data}
if len(auth_key) != 0:
request_data["auth"] = hmac.new(bytes(auth_key.encode("utf-8")),
msg=data.encode("utf-8"),
digestmod=hashlib.sha256).hexdigest()
if batch:
request_data["bulk"] = True
return json.dumps(request_data)
@staticmethod
def send_data(url, data, method, headers, timeout):
"""
:param url: Atom API endpoint
:type url: str
:param data: Data that will be sent to Atom
:type data: str
:param method: Type of HTTP request
:type method: str
:param headers: HTTP request headers
:type headers: dict
:param timeout: request timeout
:return: response from server
:rtype: Response
"""
with requests.Session() as session:
session.headers.update(headers)
request = Request(url, data, session, timeout)
if method.lower() == "get":
return request.get()
else:
return request.post()
def _session_to_file(self, response, request_time):
"""
Writes SDK request and response to JSON file in the following format:
[{req},{res}...]
:param request_time: request time
:param response: The 'requests' lib response object (including the original request)
"""
raw_request = response.raw_response.request
request_data = raw_request.body if raw_request.body is not None else '"{}"'.format(raw_request.path_url)
session_id = uuid.uuid4()
# self._raw_logger.emit("hi")
self._raw_logger.info('''{"request": {"id": "%s", "requestTime": "%s", "data": %s, "headers": %s}}''' %
(session_id,
request_time,
request_data,
json.dumps(str(raw_request.headers))
))
# Format response
response_headers = json.dumps(str(response.raw_response.headers)) \
if hasattr(response.raw_response, "headers") else "\"None\""
response_body = response.data if response.data is not None else response.error
try:
response_body = json.loads(response_body)
except ValueError:
pass
# There is no consistency at API response on v1
if response.status == 401:
response_body = str(response_body).replace("\"", "'")
self._raw_logger.info(
'''{"response": {"id": "%s", "responseTime": "%s", "body": "%s", "code": %s, "headers": %s}}''' %
(session_id,
datetime.datetime.now().isoformat(),
response_body,
response.status,
response_headers))
|
metricjar/atom-python | ironsource/atom/atom_logger.py | import logging
import logging.handlers
def get_logger(name="AtomLogger", debug=False, file_name="atom-raw.json"):
"""
Atom Logger factory: If AtomRawLogger == name then it will return a rotating-file logger, else just logs to stdout.
:param name: logger name
:param debug: Disable/enable debug printing
:return:
"""
logger = logging.getLogger(name)
new_level = logging.DEBUG if debug else logging.INFO
if name == "AtomRawLogger":
logger.setLevel(logging.INFO)
ch = logging.handlers.RotatingFileHandler(file_name,
encoding="utf8",
maxBytes=50 * 1024 * 1024,
backupCount=100)
ch.setLevel(logging.INFO)
logger.addHandler(ch)
logger.propagate = 0
return logger
if logger.level == logging.NOTSET or logger.level != new_level:
logger.setLevel(new_level)
# Create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# Create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
if len(logger.handlers) == 0:
logger.addHandler(ch)
else:
# case we already have a stdout handler and we want to change its level
logger.handlers[0].level = new_level
logger.propagate = 0
logger.info('Starting Logger: {}, debug: {}'.format(name, debug))
return logger
|
metricjar/atom-python | ironsource/atom/event_storage.py | <reponame>metricjar/atom-python<filename>ironsource/atom/event_storage.py
import abc
class EventStorage:
"""
Abstract Base Class for providing a generic way of storing events in a backlog before they are sent to Atom.
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
pass
@abc.abstractmethod
def add_event(self, event_object):
"""
Add event (must to be synchronized)
:param event_object: Event data object
:type event_object: Event
"""
pass
@abc.abstractmethod
def get_event(self, stream):
"""
Get event (must to be synchronized)
:param stream: Atom stream name
:type stream: str
:return: Event object from storage
:rtype: Event
"""
pass
@abc.abstractmethod
def remove_event(self, stream):
"""
Remove event from storage
:param stream: Atom stream name
:type stream str`
"""
pass
@abc.abstractmethod
def is_empty(self):
"""
Check if the storage is empty
:return: True is empty, else False
"""
pass
|
metricjar/atom-python | ironsource/atom/queue_event_storage.py | <reponame>metricjar/atom-python
from ironsource.atom.event_storage import EventStorage
from Queue import Queue
from threading import Lock
class QueueEventStorage(EventStorage):
"""
Queue event storage - in memory queue that implements ABC EventStorage
"""
def __init__(self, queue_size, block=True, timeout=None):
"""
:param queue_size: Queue size
:param block: Should the put to Queue block or not
:param timeout: Timeout in case we are blocking
"""
super(QueueEventStorage, self).__init__()
self._dictionary_lock = Lock()
self._queue_size = queue_size
self._events = {}
self._block = block
self._timeout = timeout if not block else None
def add_event(self, event_object):
"""
Add event object to queue
:param event_object: Event object
:type event_object: Event
"""
with self._dictionary_lock:
if event_object.stream not in self._events:
self._events[event_object.stream] = Queue(maxsize=self._queue_size)
self._events[event_object.stream].put(event_object, block=self._block, timeout=self._timeout)
def get_event(self, stream):
"""
Get & remove event object from queue
:type stream: Atom stream name
:return: Event object from queue
:rtype: Event
"""
if stream in self._events and not self._events[stream].empty():
return self._events[stream].get(block=self._block, timeout=self._timeout)
def remove_event(self, stream):
"""
Remove event object from queue
:param stream: Atom stream name
:type stream: str
"""
return self.get_event(stream)
def is_empty(self):
"""
Check if the storage is empty
:return: True is empty, else False
"""
for stream in self._events:
if not self._events[stream].empty():
return False
return True
|
metricjar/atom-python | ironsource/__init__.py | __author__ = 'Atom Core Team'
__version__ = '1.5.4'
__license__ = 'MIT'
__copyright__ = "ironSource"
|
metricjar/atom-python | ironsource/atom/response.py | class Response:
"""
Response information from Atom server
"""
def __init__(self, error, data, status, raw_response):
"""
:param error: Error information
:type error: object
:param data: Response information data
:type data: object
:param status: Response status from server
:type status: int
:param raw_response: Original response object
:type raw_response: object
"""
self.error = error
self.data = data
self.status = status
self.raw_response = raw_response
|
terapyon/minenohara-dic | gui.py | import json
import eel
from db.ejdic import found_word
LIMIT = 10
def words_means(word):
out = []
for i in range(4):
out.extend(list(found_word(word, i)))
if len(out) > LIMIT + 20:
break
return out
def results(word):
count = 0
text_list = set()
out = words_means(word)
for text, mean in out:
if text not in text_list:
yield text, mean
count += 1
text_list.add(text)
if count > LIMIT + 1:
break
@eel.expose
def get_candidates(word):
result = [
{"word": word, "candidate": text, "translate": mean}
for text, mean in results(word)
]
return json.dumps(result)
def start_gui():
app_options = {
"mode": "chrome",
"port": 18085,
}
eel.init("front/dist")
eel.start("index.html", **app_options)
if __name__ == "__main__":
start_gui()
|
terapyon/minenohara-dic | db/ejdic.py | <reponame>terapyon/minenohara-dic<gh_stars>1-10
import os
import sqlite3
from db.automata import Matcher, find_all_matches
dataset_dir = os.path.dirname(os.path.abspath(__file__))
conn = sqlite3.connect(dataset_dir + "/ejdict.sqlite3")
def make_sorted_words():
c = conn.cursor()
# sql = '''SELECT * FROM items WHERE level > 0 ORDER BY word'''
sql = """SELECT * FROM items ORDER BY word"""
rows = c.execute(sql)
return sorted((r[1], r[2]) for r in rows)
dic_words = make_sorted_words()
words = [w for w, m in dic_words]
means = [m for w, m in dic_words]
def found_word(word, k):
m = Matcher(words, means)
return find_all_matches(word, k, m)
if __name__ == "__main__":
print(len(dic_words))
print(dic_words)
|
jimmymkude/min_max_tic_tac_toe_bot | jimmy_bot_tic_tac_toe/tic_tac_toe.py | <gh_stars>0
########################################################################
#
# Created by: <NAME>
# Date Created: 6/15/2017
#
# Driver of the game
#
########################################################################
import sys
from bot import Bot
import library as game
def update_board(board, row, col, char, bots):
board[row][col] = char
for bot in bots:
if bot.active:
bot.update_board(row, col, char)
return board
def play(board, mode='2P', difficulty=3):
game_not_over = True
turn = 0
player_char = 'X'
bot1 = Bot(board)
bot2 = Bot(board)
while game_not_over:
# switch turns
turn = turn ^ 1
if turn:
player_char = 'X'
else:
player_char = 'O'
choice_valid = False
row = -1
col = -1
while not choice_valid:
if mode == '2P':
(row, col) = game.get_choice(player_char)
elif mode == 'CPU':
#(row, col) = cpu_play(board, player_char)
pass
elif mode == '1P':
bot1.activate(difficulty=difficulty)
print("jimmy_bot: \"{}\"".format(bot1.msg))
if turn:
(row, col) = game.get_choice(player_char)
else:
(row, col) = bot1.play()
print("jimmy_bot: \"{}\"".format(bot1.msg))
#print("(" + str(row) + ", " + str(col) + ")")
choice_valid = game.validate_choice(board, row, col)
# update and draw board
update_board(board, row, col, player_char, [bot1, bot2])
game.draw_board(board)
# evaluate game
game_not_over = not game.game_over(board)
def main():
game.print_guidelines()
content = []
for i in range(9):
content.append(' ')
board = game.initialize_board(content)
game.draw_board(board)
mode = 'CPU'
difficulty = 3
if sys.argv[1] == '1':
mode = '1P'
elif sys.argv[1] == '2':
mode = '2P'
if sys.argv[2] == 'easy':
difficulty = 1
elif sys.argv[2] == 'medium':
difficulty = 2
play(board, mode, difficulty)
main()
|
jimmymkude/min_max_tic_tac_toe_bot | jimmy_bot_tic_tac_toe/library.py |
########################################################################
#
# Created by: <NAME>
# Date Created: 6/15/2017
#
# Tic Tac Toe Library functions to support game execution
#
########################################################################
def initialize_board(content_list):
board = []
index = 0
for i in range(3):
row = []
for j in range(3):
row.append(content_list[index])
index += 1
board.append(row)
return board
def print_guidelines():
print()
print("######################################################")
print("Jimmy's Tic Tac Toe Board")
print()
print("Coordinate Guideline:")
content = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
board = initialize_board(content)
draw_board(board)
print("######################################################")
print()
print("Modes: ['1': 1 Player, '2': 2 Player, '3': CPU v CPU]")
print()
print("######################################################")
print()
print("Diffuculty: ['easy', 'medium', 'hard']")
print()
print("######################################################")
print()
print("Syntax: python3 tic_tac_toe.py <mode> <difficulty>")
print("e.g. To play CPU on 'hard' difficulty you would run: ")
print(" python3 tic_tac_toe.py 1 hard")
print()
def draw_board(board):
print()
print(" __ __ __")
for row in board:
row_str = ""
for cell in row:
row_str += " " + cell + " |"
print(" |" + row_str)
print(" __ __ __")
print()
def is_integer(string):
try:
int(string)
return True
except ValueError:
print("Error. Please select an integer from 1 to 9.\n")
return False
def get_choice(player_char):
choice = ''
valid = False
while not valid:
choice = input("Player {}'s turn: ".format(player_char))
valid = is_integer(choice)
choice = int(choice) - 1
row = 2
col = -1
while(row >= 0):
col = choice - 3 * row
if col >= 0 and col <= 2:
break
row -= 1
return (row, col)
def board_full(board):
for row in board:
for char in row:
if char == ' ':
return False
return True
def validate_cood(x, y):
return x >= 0 and x <= 2 and y >=0 and y <=2
def validate_choice(board, x, y, mode='verbose'):
if validate_cood(x,y) and board[x][y] == ' ':
return True
if mode == 'verbose':
print("Invalid Choice. Please select an integer from 1 to 9 for an open spot on the grid.")
return False
"""
Recursive Depth first, goes in /(direction) until it steps out of bounds
Tries to find a sequence of 3 similar characters in the path
"""
def try_path(board, x, y, player_char, direction, curr_length):
if board[x][y] != ' ' and board[x][y] == player_char:
curr_length += 1
if curr_length >= 3:
return True
next_x = x + direction[0]
next_y = y + direction[1]
if validate_cood(next_x, next_y):
return try_path(board, next_x, next_y, player_char, direction, curr_length)
return False
"""
Iterates through each valid neighbours and tries the path
recursively using try_path()
"""
def try_paths(board, row, col):
neighbour_x = [1, 1, 0, -1]
neighbour_y = [0, 1, 1, 1]
player_char = board[row][col]
# for the 4 adjacent (right, down-right, down, down-left) neighbours
for i in range(4):
x = row + neighbour_x[i]
y = col + neighbour_y[i]
# if it is a valid neighbour
if validate_cood(x, y):
direction = (neighbour_x[i], neighbour_y[i])
found = try_path(board, x, y, player_char, direction, 1)
if found:
return True
return False
"""
Iterates through each box in the grid and tries its paths using try_paths
Skips empty boxes
"""
def find_win(board):
for row in range(len(board)):
for col in range(len(board[row])):
player_char = board[row][col]
if player_char == ' ':
continue
found = try_paths(board, row, col)
if found:
return (True, player_char)
return (False, '')
def game_over(board):
(won, player_char) = find_win(board)
if won:
print("Player {} won.".format(player_char))
return True
if board_full(board):
print("Tie.")
return True
return False
|
jimmymkude/min_max_tic_tac_toe_bot | jimmy_bot_tic_tac_toe/bot.py | <gh_stars>0
########################################################################
#
# Created by: <NAME>
# Date Created: 6/15/2017
#
# Represents a tic tac toe bot, uses min max
#
########################################################################
import library as game
import copy
class Bot:
def __init__(self, board):
self.board = copy.deepcopy(board)
self.active = False
self.msg = "My name is <NAME>. Goodluck mate."
self.score_msg = {
-1 : "That was sloppy game for me. Try playing me in hard mode.",
0 : "This is a tie mate.",
1 : "Anyone's game.",
2 : "This win is mine."
}
self.difficulty_depth = {
1 : 3,
2 : 4,
3 : 8
}
def activate(self, char='O', difficulty=3):
self.active = True
self.char = char
self.opponent_char = self.inverse_char(char)
self.difficulty = difficulty
self.max_depth = self.difficulty_depth[self.difficulty]
#print("jimmy_bot difficulty: {}".format(self.max_depth))
def draw_board(self):
print()
print("Bot board.")
print(" __ __ __")
for row in self.board:
row_str = ""
for cell in row:
row_str += " " + cell + " |"
print(" |" + row_str)
print(" __ __ __")
print()
def update_board(self, x, y, char):
self.board[x][y] = char
#self.draw_board()
def get_possible_moves(self, board):
moves = []
for i in range(len(board)):
for j in range(len(board[i])):
char = board[i][j]
if char == ' ':
#print((i,j))
moves.append((i,j))
#print()
return moves
def inverse_char(self, char):
inverse = 'X'
if char == 'X':
inverse = 'O'
return inverse
def explore(self, board, box, char, our_char, depth, max_depth):
b = copy.deepcopy(board)
b[box[0]][box[1]] = char
(won, winning_char) = game.find_win(b)
if won:
# win
if winning_char == our_char:
return 2
# loss
#print(box)
#game.draw_board(b)
return -1
if game.board_full(b):
# draw
return 0
if depth >= max_depth:
# not determined
#print("Depth is 3")
return 1
#print("Depth: " + str(depth))
#print("Chose: {}".format(box))
#print("Next Moves: ")
next_moves = self.get_possible_moves(b)
# if no more moves then its a draw
#if not next_moves:
#return 0
new_depth = depth + 1
scores = []
for move in next_moves:
scores.append(self.explore(b, move, self.inverse_char(char), our_char, new_depth, max_depth))
if char == our_char:
#print("Max: {}".format(max(scores)))
return min(scores)
#print("Min: {}".format(min(scores)))
return max(scores)
def select_move(self, moves):
scores = {}
options = {}
b = copy.deepcopy(self.board)
for move in moves:
#game.draw_board(b)
score = self.explore(b, move, self.char, self.char, 1, max_depth=self.max_depth)
scores[score] = move
options[move] = score
#print(options)
selected_score = max(scores)
selected_move = scores[selected_score]
#print("selected: {}".format(selected_move))
self.msg = self.score_msg[selected_score]
return selected_move
def play(self):
row = -1
col = -1
possible_moves = self.get_possible_moves(self.board)
#print(possible_moves)
if possible_moves:
row = possible_moves[0][0]
col = possible_moves[0][1]
(row, col) = self.select_move(possible_moves)
#print(validate_choice(board, row, col))
return (row, col)
|
EdgardoDev/django-store | djangostore/store/models.py | <filename>djangostore/store/models.py
from django.db import models
# Create your models here.
class Product(models.Model):
name = models.CharField(max_length=100)
stock_count = models.IntegerField()
price = models.DecimalField(max_digits=6, decimal_places=2)
|
Japanese-Visual-Media-Graph/utils | labels_for_elastic_search.py | import elasticsearch
from SPARQLWrapper import SPARQLWrapper, JSONLD
sparql = SPARQLWrapper("http://localhost:3030/public")
sparql.setReturnFormat(JSONLD)
es = elasticsearch.Elasticsearch()
es.indices.delete(index="jvmg_search", ignore=[400, 404])
settings = {
"settings": {
"number_of_shards": 6,
"number_of_replicas": 0
},
'mappings':
{'properties':
{'object': {'type': 'wildcard'},
'predicate': {'type': 'wildcard'},
'subject': {'type': 'wildcard'}}}}
es.indices.create(index="jvmg_search", ignore=400, body=settings)
step_size = 20_000
for i in range(0, 60_000_000, step_size):
print(i)
query = f"""
CONSTRUCT {{?s ?p ?o}}
WHERE {{
?s ?p ?o . filter isLiteral(?o)
}} LIMIT {step_size} OFFSET {i}"""
sparql.setQuery(query)
sparql_data = sparql.query().convert()
if len(sparql_data) == 0:
break
bulk_data = []
for id, item in enumerate(sparql_data):
bulk_data.append({"index": {"_id": str(i+id),
"_index": "jvmg_search"}})
bulk_data.append({"subject": str(item[0]),
"predicate": str(item[1]),
"object": str(item[2])})
res = es.bulk(body=bulk_data)
if res["errors"]:
print(res)
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter02/Activity2.01/juggler/projectm/migrations/0001_initial.py | <reponame>PacktPublishing/Web-Development-Projects-with-Django<gh_stars>10-100
# Generated by Django 3.0.2 on 2020-01-08 18:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Project Name', max_length=50)),
('pub_date', models.DateField(verbose_name='Date the book was published.')),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(help_text='Task title', max_length=100)),
('description', models.TextField(help_text='Task description')),
('time_estimate', models.IntegerField(help_text='Time in hours required to complete the task.')),
('completed', models.BooleanField(default=False)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projectm.Project')),
],
),
]
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter01/Exercise1.04/bookr/reviews/tests.py | from django.test import TestCase
from django.test import Client
class Exercise4TestCase(TestCase):
def test_template_content(self):
"""Test that the index view returns the set names from the paramaters, or defaults to 'world'"""
c = Client()
response = c.get('/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Hello, world!')
response = c.get('/?name=Ben')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Hello, Ben!')
response = c.get('/?name=Ben&name=John')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Hello, John!')
response = c.get('/?name=')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Hello, world!')
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter12/Exercise12.03/bookr/reviews/serializers.py | <reponame>PacktPublishing/Web-Development-Projects-with-Django<gh_stars>10-100
from rest_framework import serializers
from .models import Book, Publisher
class PublisherSerializer(serializers.ModelSerializer):
class Meta:
model = Publisher
fields = ['name', 'website', 'email']
class BookSerializer(serializers.ModelSerializer):
publisher = PublisherSerializer()
class Meta:
model = Book
fields = ['title', 'publication_date', 'isbn', 'publisher']
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter14/final/bookr/reviews/tests/test_views.py | <filename>Chapter14/final/bookr/reviews/tests/test_views.py
from django.test import TestCase, RequestFactory
from reviews.views import index
class TestIndexView(TestCase):
def setUp(self):
self.factory = RequestFactory()
def test_index_view(self):
request = self.factory.get('/index')
request.session = {}
response = index(request)
self.assertEquals(response.status_code, 200)
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter11/Exercise11.03/filter_demo/templatetags/simple_tag.py | from django import template
register = template.Library()
@register.simple_tag
def greet_user(message, username):
return "{greeting_message}, {user}!!!".format(greeting_message=message, user=username)
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter14/Exercise14.01/reviews/tests.py | from django.test import TestCase
class TestSimpleComponent(TestCase):
def test_basic_sum(self):
assert 1+1 == 2
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter08/final/media_project/media_example/forms.py | from django import forms
from .models import ExampleModel
class UploadForm(forms.ModelForm):
class Meta:
model = ExampleModel
fields = "__all__"
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter13/Exercise13.06/bookr/bookr/utils.py | <filename>Chapter13/Exercise13.06/bookr/bookr/utils.py
import datetime
from django.db.models import Count
from reviews.models import Review
def get_books_read_by_month(username):
"""Get the books read by the user on per month basis.
:param: str The username for which the books needs to be returned
:return: dict of month wise books read
"""
current_year = datetime.datetime.now().year
books = Review.objects.filter(creator__username__contains=username,date_created__year=current_year).values('date_created__month').annotate(book_count=Count('book__title'))
return books
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter14/Exercise14.05/bookr_test/tests.py | <gh_stars>10-100
from django.test import TestCase, Client, RequestFactory
from django.contrib.auth.models import AnonymousUser, User
from .models import Publisher
from .views import greeting_view_user
class TestPublisherModel(TestCase):
"""Test the publisher model."""
def setUp(self):
self.p = Publisher(name='Packt', website='www.packt.com', email='<EMAIL>')
def test_create_publisher(self):
self.assertIsInstance(self.p, Publisher)
def test_str_representation(self):
self.assertEquals(str(self.p), "Packt")
class TestGreetingView(TestCase):
"""Test the greeting view."""
def setUp(self):
self.client = Client()
def test_greeting_view(self):
response = self.client.get('/test/greeting')
self.assertEquals(response.status_code, 200)
class TestLoggedInGreetingView(TestCase):
"""Test the greeting view for the authenticated users."""
def setUp(self):
self.test_user = User.objects.create_user(username='testuser', password='<PASSWORD>')
self.test_user.save()
self.factory = RequestFactory()
def test_user_greeting_not_authenticated(self):
request = self.factory.get('/test/greet_user')
request.user = AnonymousUser()
response = greeting_view_user(request)
self.assertEquals(response.status_code, 302)
def test_user_authenticated(self):
request = self.factory.get('/test/greet_user')
request.user = self.test_user
response = greeting_view_user(request)
self.assertEquals(response.status_code, 200)
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter10/Exercise10.02/bookr_admin/admin.py | from django.contrib import admin
class BookrAdmin(admin.AdminSite):
site_header = "Bookr Administration"
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter10/final/bookr/reviews/admin.py | <gh_stars>10-100
from django.contrib import admin
from reviews.models import (Publisher, Contributor, Book,
BookContributor, Review)
class BookAdmin(admin.ModelAdmin):
model = Book
list_display = ('title', 'isbn', 'get_publisher', 'publication_date')
search_fields = ['title', 'publisher__name']
def get_publisher(self, obj):
return obj.publisher.name
def initialled_name(obj):
""" obj.first_names='<NAME>', obj.last_names='Salinger'
=> 'Salinger, JD' """
initials = ''.join([name[0] for name in obj.first_names.split(' ')])
return "{}, {}".format(obj.last_names, initials)
class ContributorAdmin(admin.ModelAdmin):
list_display = ('last_names', 'first_names')
list_filter = ('last_names',)
search_fields = ('last_names__startswith', 'first_names')
# Register your models here.
admin.site.register(Publisher)
admin.site.register(Contributor, ContributorAdmin)
admin.site.register(Book, BookAdmin)
admin.site.register(BookContributor)
admin.site.register(Review)
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter13/final/bookr/reviews/tests.py | <filename>Chapter13/final/bookr/reviews/tests.py
import os
from django.conf import settings
from django.test import TestCase, Client
from django.utils import timezone
from reviews.models import Book, Publisher
class Activity2Test(TestCase):
@classmethod
def setUpTestData(cls):
p = Publisher.objects.create(name='Test Publisher')
Book.objects.create(title='Test Book', publication_date=timezone.now(), publisher=p)
def test_book_detail_media_display(self):
"""
When we first view a book we should not see a cover image or link to sample. But if we upload these, they should
then be displayed on the book detail page.
"""
cover_filename = 'machine-learning-for-algorithmic-trading.png'
cover_save_path = os.path.join(settings.MEDIA_ROOT, 'book_covers', cover_filename)
sample_filename = 'machine-learning-for-trading.pdf'
sample_save_path = os.path.join(settings.MEDIA_ROOT, 'book_samples', sample_filename)
cover_img = b'<img src="/media/book_covers/machine-learning-for-algorithmic-trading.png">'
sample_link = b'<a href="/media/book_samples/machine-learning-for-trading.pdf">Download</a>'
c = Client()
resp = c.get('/books/1/')
self.assertIn(b'<a class="btn btn-primary" href="/books/1/media/">Media</a>', resp.content)
# check the cover image and sample link aren't in the initial HTML
self.assertNotIn(cover_img, resp.content)
self.assertNotIn(sample_link, resp.content)
try:
with open(os.path.join(settings.BASE_DIR, 'fixtures', cover_filename), 'rb') as cover_fp:
with open(os.path.join(settings.BASE_DIR, 'fixtures', sample_filename), 'rb') as sample_fp:
c.post('/books/1/media/', {'cover': cover_fp, 'sample': sample_fp})
finally:
if os.path.exists(cover_save_path):
os.unlink(cover_save_path)
if os.path.exists(sample_save_path):
os.unlink(sample_save_path)
resp = c.get('/books/1/')
# check the cover image and sample link are in the HTML after uploading the media
self.assertIn(cover_img, resp.content)
self.assertIn(sample_link, resp.content)
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter12/Exercise12.01/bookr/reviews/api_views.py | <filename>Chapter12/Exercise12.01/bookr/reviews/api_views.py<gh_stars>10-100
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .models import Book
@api_view()
def first_api_view(request):
num_books = Book.objects.count()
return Response({"num_books": num_books})
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter02/Activity2.01/juggler/projectm/apps.py | <filename>Chapter02/Activity2.01/juggler/projectm/apps.py
from django.apps import AppConfig
class ProjectmConfig(AppConfig):
name = 'projectm'
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter01/Activity1.01/reviews/tests.py | <filename>Chapter01/Activity1.01/reviews/tests.py
from django.test import TestCase
from django.test import Client
class Activity1Test(TestCase):
def test_index_page(self):
"""Test the Bookr welcome screen"""
c = Client()
response = c.get('/')
self.assertIn(b'<title>Welcome to Bookr</title>', response.content)
self.assertIn(b'<h1>Welcome to Bookr</h1>', response.content)
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter10/Exercise10.03/bookr_admin/admin.py | from django.contrib import admin
class BookrAdmin(admin.AdminSite):
site_header = "Bookr Administration"
logout_template = "admin/logout.html"
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter16/Exercise16.01/bookr/reviews/tests.py | <reponame>PacktPublishing/Web-Development-Projects-with-Django
import os
from django.conf import settings
from django.test import TestCase, Client
class Exercise1Test(TestCase):
def test_view_and_template(self):
"""Test that the view, URLs and template are set up properly by checking the contents of the response."""
c = Client()
resp = c.get('/react-example/')
self.assertIn(b'<div id="react_container"></div>', resp.content)
self.assertIn(b'<script crossorigin src="https://unpkg.com/react@16/umd/react.development.js"></script>',
resp.content)
self.assertIn(
b'<script crossorigin src="https://unpkg.com/react-dom@16/umd/react-dom.development.js"></script>',
resp.content)
self.assertIn(b'<script src="/static/react-example.js"></script>', resp.content)
def test_js_content(self):
"""Test that some expected things are in the JS file."""
with open(os.path.join(settings.BASE_DIR, 'static', 'react-example.js')) as f:
static_content = f.read()
self.assertIn('class ClickCounter extends React.Component {', static_content)
self.assertIn('this.state = { clickCount: 0 };', static_content)
self.assertIn('{ onClick: () => this.setState({ clickCount: this.state.clickCount + 1 }) },', static_content)
self.assertIn('ReactDOM.render(e(ClickCounter), document.getElementById(\'react_container\'));', static_content) |
PacktPublishing/Web-Development-Projects-with-Django | Chapter15/Exercise15.02/bookr/reviews/tests.py | from configurations import values
from django.test import TestCase
from bookr import settings as direct_settings
class Exercise2Test(TestCase):
def test_dj_database_url(self):
"""Test that the DATABASES is being set with a URL."""
# since we are running in DEBUG mode we can check how the values were set on the non-active conf
self.assertFalse(direct_settings.Prod.DEBUG)
self.assertIsInstance(direct_settings.Prod.DATABASES, values.DatabaseURLValue)
self.assertEquals(direct_settings.Prod.DATABASES.environ_prefix, 'DJANGO')
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter18/final/bookr/reviews/templatetags/profile_tags.py | <filename>Chapter18/final/bookr/reviews/templatetags/profile_tags.py<gh_stars>10-100
from django import template
from reviews.models import Review
register = template.Library()
@register.inclusion_tag('book_list.html')
def book_list(username):
"""Render the list of books read by a user.
:param: str username The username for whom the books should be fetched
:return: dict of books read by user
"""
reviews = Review.objects.filter(creator__username__contains=username)
book_list = [review.book.title for review in reviews]
return {'books_read': book_list}
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter07/Exercise7.03/bookr/reviews/tests.py | <filename>Chapter07/Exercise7.03/bookr/reviews/tests.py
import re
from django.test import Client
from django.test import TestCase
from reviews.models import Publisher
class Exercise3Test(TestCase):
def test_fields_in_view(self):
""""
Test that fields exist in the rendered template.
"""
c = Client()
response = c.get('/publishers/new/')
self.assertIsNotNone(re.search(r'<input type="hidden" name="csrfmiddlewaretoken" value="\w+">',
response.content.decode('ascii')))
self.assertIn(
b'<label for="id_name">Name:</label> <input type="text" name="name" maxlength="50" required id="id_name"> '
b'<span class="helptext">The name of the Publisher.</span></p>',
response.content)
self.assertIn(
b'<label for="id_website">Website:</label> <input type="url" name="website" maxlength="200" '
b'required id="id_website"> <span class="helptext">The Publisher\'s website.</span></p>',
response.content)
self.assertIn(
b'<label for="id_email">Email:</label> <input type="email" name="email" maxlength="254" '
b'required id="id_email"> <span class="helptext">The Publisher\'s email address.</span>',
response.content)
self.assertIn(b'<input type="submit" value="Submit">', response.content)
def test_publisher_create(self):
"""Test the creation of a new Publisher"""
self.assertEqual(Publisher.objects.all().count(), 0)
c = Client()
publisher_name = '<NAME> Publisher'
publisher_website = 'http://www.example.com/test-publisher/'
publisher_email = '<EMAIL>'
response = c.post('/publishers/new/', {
'name': publisher_name,
'website': publisher_website,
'email': publisher_email
})
self.assertEqual(response.status_code, 302)
self.assertEqual(Publisher.objects.all().count(), 1)
publisher = Publisher.objects.first()
self.assertEqual(publisher.name, publisher_name)
self.assertEqual(publisher.website, publisher_website)
self.assertEqual(publisher.email, publisher_email)
self.assertEqual(response['Location'], '/publishers/{}/'.format(publisher.pk))
def test_publisher_no_create(self):
"""Test that no Publisher is created if the form is invalid."""
self.assertEqual(Publisher.objects.all().count(), 0)
c = Client()
response = c.post('/publishers/new/', {
'name': '',
'website': 'not a url',
'email': 'not an email'
})
self.assertEqual(response.status_code, 200)
self.assertEqual(Publisher.objects.all().count(), 0)
def test_publisher_edit(self):
"""
Test editing a publisher, the initial GET should have a form with values and then the post should update the
Publisher rather than creating a new one.
"""
publisher_name = '<NAME>'
publisher_website = 'http://www.example.com/edit-publisher/'
publisher_email = '<EMAIL>'
publisher = Publisher(name=publisher_name, website=publisher_website, email=publisher_email)
publisher.save()
self.assertEqual(Publisher.objects.all().count(), 1)
c = Client()
response = c.get('/publishers/{}/'.format(publisher.pk))
self.assertIn(b'value="Test Edit Publisher"', response.content)
self.assertIn(b'value="http://www.example.com/edit-publisher/"', response.content)
self.assertIn(b'value="<EMAIL>"', response.content)
response = c.post('/publishers/{}/'.format(publisher.pk), {
'name': '<NAME>',
'website': 'https://www.example.com/updated/',
'email': '<EMAIL>'
})
self.assertEqual(response.status_code, 302)
self.assertEqual(Publisher.objects.all().count(), 1)
publisher2 = Publisher.objects.first()
self.assertEqual(publisher2.pk, publisher.pk)
self.assertEqual(publisher2.name, '<NAME>')
self.assertEqual(publisher2.website, 'https://www.example.com/updated/')
self.assertEqual(publisher2.email, '<EMAIL>')
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter01/Exercise1.08/bookr/reviews/tests.py | from django.test import TestCase
from django.test import Client
class Exercise8TestCase(TestCase):
def test_exception(self):
"""The view should raise a NameError exception"""
c = Client()
with self.assertRaises(NameError):
c.get('/')
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter11/Exercise11.01/filter_demo/templatetags/explode_filter.py | from django import template
register = template.Library()
@register.filter
def explode(value, separator):
return value.split(separator)
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter14/final/bookr/reviews/tests/test_models.py | <filename>Chapter14/final/bookr/reviews/tests/test_models.py
from django.test import TestCase
from reviews.models import Book, Publisher, Contributor
class TestPublisherModel(TestCase):
def test_create_publisher(self):
publisher = Publisher.objects.create(name='Packt', website='www.packt.com', email='<EMAIL>')
self.assertIsInstance(publisher, Publisher)
class TestContributorModel(TestCase):
def test_create_contributor(self):
contributor = Contributor.objects.create(first_names='Joe', last_names='Duck', email='<EMAIL>')
self.assertIsInstance(contributor, Contributor)
class TestBookModel(TestCase):
def setUp(self):
self.publisher = Publisher.objects.create(name='Packt', website='www.packt.com', email='<EMAIL>')
self.contributor = Contributor.objects.create(first_names='Joe', last_names='Duck', email='<EMAIL>')
def test_create_book(self):
book = Book.objects.create(title='The Testing Book', isbn='0000-00-000', publication_date='2020-08-01', publisher=self.publisher)
book.contributors.set([self.contributor])
self.assertIsInstance(book, Book)
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter09/Exercise09.02/bookr/bookr/views.py | <filename>Chapter09/Exercise09.02/bookr/bookr/views.py
from django.shortcuts import render
def profile(request):
return render(request, 'profile.html')
|
PacktPublishing/Web-Development-Projects-with-Django | Chapter08/Exercise8.06/media_project/media_example/forms.py | from django import forms
class UploadForm(forms.Form):
image_upload = forms.ImageField()
file_upload = forms.FileField()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.