gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
from __future__ import absolute_import, print_function, division
import copy
import logging
import time
from theano.compat import izip
from nose.plugins.skip import SkipTest
import numpy
from six.moves import xrange
import theano
from theano.compile.sharedvalue import shared
from theano.compile.pfunc import pfunc
from theano import tensor
from theano import config
import theano.tensor.nnet.conv as conv
import theano.tensor.signal.pool as pool
import theano.sandbox.cuda as tcn
import theano.tests.unittest_tools as utt
if theano.config.mode not in ['FAST_RUN', 'Mode', 'ProfileMode']:
raise SkipTest('Skip test_mlp when not in normal optimization mode as '
'otherwise it is too slow!')
# Skip test if cuda_ndarray is not available.
if tcn.cuda_available is False:
raise SkipTest('Optional package cuda disabled')
logging.getLogger('theano.sandbox.cuda.tests.test_nnet').setLevel(logging.INFO)
def my_rand(*shape):
return theano._asarray(numpy.random.rand(*shape), dtype='float32')
def my_randn(*shape):
return theano._asarray(numpy.random.randn(*shape), dtype='float32')
def my_zeros(*shape):
return theano._asarray(numpy.zeros(*shape), dtype='float32')
def get_mode(use_gpu, check_isfinite=True):
if theano.config.mode != 'FAST_COMPILE':
ret = theano.compile.get_default_mode()
else:
ret = theano.compile.mode.get_mode('FAST_RUN')
if isinstance(ret, theano.compile.ProfileMode):
ret = copy.copy(ret)
if isinstance(ret, theano.compile.DebugMode):
ret = copy.copy(ret)
ret.check_isfinite = check_isfinite
if use_gpu:
ret = ret.including('gpu')
else:
ret = ret.excluding('gpu')
return ret
def print_mode(mode):
if mode is not None and isinstance(mode, (theano.compile.ProfileMode,)):
mode.print_summary()
def print_diff_mode(a, b):
if (a is not None and
isinstance(a, (theano.compile.ProfileMode,)) and
isinstance(b, (theano.compile.ProfileMode,))):
a.print_diff_summary(b)
def run_nnet(use_gpu, n_batch=60, n_in=1024, n_hid=2048, n_out=10,
n_train=100):
if config.mode == 'DEBUG_MODE':
n_train = 1
if use_gpu:
w = tcn.shared_constructor(0.01 * (my_rand(n_in, n_hid) - 0.5), 'w')
b = tcn.shared_constructor(my_zeros(n_hid), 'b')
v = tcn.shared_constructor(my_zeros((n_hid, n_out)), 'c')
c = tcn.shared_constructor(my_zeros(n_out), 'c')
else:
w = shared(0.01 * (my_rand(n_in, n_hid) - 0.5), 'w')
b = shared(my_zeros(n_hid), 'b')
v = shared(my_zeros((n_hid, n_out)), 'c')
c = shared(my_zeros(n_out), 'c')
x = tensor.fmatrix('x')
y = tensor.fmatrix('y')
lr = tensor.fscalar('lr')
hid = tensor.tanh(tensor.dot(x, w) + b)
out = tensor.tanh(tensor.dot(hid, v) + c)
loss = tensor.sum(0.5 * (out - y) ** 2 * lr)
if 0:
print('loss type', loss.type)
params = [w, b, v, c]
gparams = tensor.grad(loss, params)
mode = get_mode(use_gpu)
# print 'building pfunc ...'
train = pfunc([x, y, lr], [loss], mode=mode,
updates=[(p, p - g) for p, g in izip(params, gparams)])
if 0:
for i, n in enumerate(train.maker.fgraph.toposort()):
print(i, n)
xval = my_rand(n_batch, n_in)
yval = my_rand(n_batch, n_out)
lr = theano._asarray(0.01, dtype='float32')
t0 = time.time()
rval = []
for i in xrange(n_train):
rval.append(train(xval, yval, lr))
dt = time.time() - t0
print_mode(mode)
return numpy.asarray(rval), dt
@utt.AttemptManyTimes(n_attempts=3, n_req_successes=1)
def test_run_nnet():
for n_in in 1024, 2048, 4096:
for n_hid in 1024, 2048, 4096:
utt.seed_rng() # Seeds numpy rng with utt.fetch_seed()
rval_cpu, tc = run_nnet(False, n_in=n_in, n_hid=n_hid)
utt.seed_rng()
rval_gpu, tg = run_nnet(True, n_in=n_in, n_hid=n_hid)
# print "cpu:", rval_cpu
# print "gpu:", rval_gpu
abs_diff, rel_diff = \
theano.gradient.numeric_grad.abs_rel_err(rval_gpu,
rval_cpu)
max_abs_diff = abs_diff.max()
# print "max abs diff=%e max rel diff=%e n_in=%d n_hid=%d" % (
# max_abs_diff, rel_diff.max(), n_in, n_hid)
# print "time cpu: %f, time gpu: %f, speed up %f" % (tc, tg, tc / tg)
rtol = 1e-4
if n_in * n_hid >= 2048 * 4096:
rtol = 7e-4
assert numpy.allclose(
rval_cpu, rval_gpu, rtol=rtol, atol=1e-6), \
("max_abs_diff, max_rel_diff, n_in, n_hid", max_abs_diff,
rel_diff.max(), n_in, n_hid)
def test_run_nnet_med():
utt.seed_rng()
run_nnet(False, 10, 128, 50, 4, n_train=10000)
def test_run_nnet_small():
utt.seed_rng()
run_nnet(False, 10, 10, 4, 4, n_train=100000)
def run_conv_nnet1(use_gpu):
if use_gpu:
shared_fn = tcn.shared_constructor
else:
shared_fn = shared
n_batch = 16
n_kern = 20
shape_img = (n_batch, 1, 32, 32)
shape_kern = (n_kern, 1, 5, 5)
n_train = 10
if config.mode == 'DEBUG_MODE':
n_train = 1
logical_hid_shape = tcn.blas.GpuConv.logical_output_shape_2d(
shape_img[2:], shape_kern[2:], 'valid')
n_hid = n_kern * logical_hid_shape[0] * logical_hid_shape[1]
n_out = 10
w = shared_fn(0.01 * (my_rand(*shape_kern) - 0.5), 'w')
b = shared_fn(my_zeros((n_kern,)), 'b')
v = shared_fn(my_zeros((n_hid, n_out)), 'c')
c = shared_fn(my_zeros(n_out), 'c')
x = tensor.Tensor(dtype='float32', broadcastable=(0, 1, 0, 0))('x')
y = tensor.fmatrix('y')
lr = tensor.fscalar('lr')
conv_op = conv.ConvOp(shape_img[2:], shape_kern[2:], n_kern, n_batch, 1, 1)
hid = tensor.tanh(conv_op(x, w) + b.dimshuffle((0, 'x', 'x')))
hid_flat = hid.reshape((n_batch, n_hid))
out = tensor.tanh(tensor.dot(hid_flat, v) + c)
loss = tensor.sum(0.5 * (out - y) ** 2 * lr)
# print 'loss type', loss.type
params = [w, b, v, c]
gparams = tensor.grad(loss, params)
mode = get_mode(use_gpu)
# print 'building pfunc ...'
train = pfunc(
[x, y, lr],
[loss],
mode=mode,
updates=[(p, p - g) for p, g in zip(params, gparams)])
# for i, n in enumerate(train.maker.fgraph.toposort()):
# print i, n
xval = my_rand(*shape_img)
yval = my_rand(n_batch, n_out)
lr = theano._asarray(0.01, dtype='float32')
for i in xrange(n_train):
rval = train(xval, yval, lr)
# print 'training done'
print_mode(mode)
return rval
def test_conv_nnet1():
utt.seed_rng()
rval_cpu = run_conv_nnet1(False)
utt.seed_rng()
rval_gpu = run_conv_nnet1(True)
utt.assert_allclose(rval_cpu, rval_gpu, rtol=1e-4, atol=1e-6)
def run_conv_nnet2(use_gpu): # pretend we are training LeNet for MNIST
if use_gpu:
shared_fn = tcn.shared_constructor
else:
shared_fn = shared
# cumulativ rounding error affect this comparaison of result. So we lower the tolerance.
# TODO: why the last two example see the error lower? We are converging?
# n_train=10, n_batch=3, n_kern=1, n_kern1=1, error see of 1e-9
# n_train=10, n_batch=3, n_kern=10, n_kern1=1, error see of -1.27777e-06
# n_train=10, n_batch=3, n_kern=10, n_kern1=10, error see of -6.91377e-05
# n_train=10, n_batch=30, n_kern=10, n_kern1=10, error see of -0.00185963
# n_train=10, n_batch=60, n_kern=10, n_kern1=10, error see of -5.26905e-05
# n_train=30, n_batch=60, n_kern=10, n_kern1=10, error see of -3.8147e-06
# n_train=30, n_batch=60, n_kern=20, n_kern1=10, error see of 6.82771e-05
# n_train=30, n_batch=60, n_kern=20, n_kern1=30, error see of 0.000231534
n_batch = 60
shape_img = (n_batch, 1, 32, 32)
n_kern = 20
shape_kern = (n_kern, 1, 5, 5)
n_kern1 = 10
shape_kern1 = (n_kern1, n_kern, 5, 5)
n_train = 30
if config.mode == 'DEBUG_MODE':
n_train = 1
logical_hid_shape = tcn.blas.GpuConv.logical_output_shape_2d(tuple(
shape_img[2:]), tuple(shape_kern[2:]), 'valid')
logical_hid_shape1 = tcn.blas.GpuConv.logical_output_shape_2d(
(logical_hid_shape[0] // 2, logical_hid_shape[1] // 2),
tuple(shape_kern1[2:]), 'valid')
n_hid = n_kern1 * logical_hid_shape1[0] * logical_hid_shape1[1]
n_out = 10
w0 = shared_fn(0.01 * (my_rand(*shape_kern) - 0.5), 'w0')
b0 = shared_fn(my_zeros((n_kern,)), 'b0')
w1 = shared_fn(0.01 * (my_rand(*shape_kern1) - 0.5), 'w1')
b1 = shared_fn(my_zeros((n_kern1,)), 'b1')
v = shared_fn(my_zeros((n_hid, n_out)), 'c')
c = shared_fn(my_zeros(n_out), 'c')
x = tensor.Tensor(dtype='float32', broadcastable=(0, 1, 0, 0))('x')
y = tensor.fmatrix('y')
lr = tensor.fscalar('lr')
conv_op = conv.ConvOp(shape_img[2:], shape_kern[2:], n_kern, n_batch, 1, 1)
conv_op1 = conv.ConvOp((n_kern, logical_hid_shape[0] // 2,
logical_hid_shape[1] // 2),
shape_kern1[2:],
n_kern1, n_batch, 1, 1)
hid = tensor.tanh(conv_op(x, w0) + b0.dimshuffle((0, 'x', 'x')))
hid1 = tensor.tanh(conv_op1(hid[:, :, ::2, ::2], w1) + b1.dimshuffle((
0, 'x', 'x')))
hid_flat = hid1.reshape((n_batch, n_hid))
out = tensor.tanh(tensor.dot(hid_flat, v) + c)
loss = tensor.sum(0.5 * (out - y) ** 2 * lr)
# print 'loss type', loss.type
params = [w0, b0, w1, b1, v, c]
gparams = tensor.grad(loss, params)
mode = get_mode(use_gpu)
# print 'building pfunc ...'
train = pfunc(
[x, y, lr],
[loss],
mode=mode,
updates=[(p, p - g) for p, g in zip(params, gparams)])
# for i, n in enumerate(train.maker.fgraph.toposort()):
# print i, n
xval = my_rand(*shape_img)
yval = my_rand(n_batch, n_out) # int32 make all 0...
lr = theano._asarray(0.01, dtype='float32')
for i in xrange(n_train):
rval = train(xval, yval, lr)
print_mode(mode)
return rval
def test_conv_nnet2():
utt.seed_rng()
rval_gpu = run_conv_nnet2(True)
if True:
utt.seed_rng()
rval_cpu = run_conv_nnet2(False)
# print rval_cpu[0], rval_gpu[0],rval_cpu[0]-rval_gpu[0]
utt.assert_allclose(rval_cpu, rval_gpu, rtol=1e-4, atol=1e-4)
def build_conv_nnet2_classif(use_gpu, isize, ksize, n_batch,
downsample_ops=True, verbose=0, version=-1,
check_isfinite=True):
if use_gpu:
shared_fn = tcn.shared_constructor
else:
shared_fn = shared
isize1 = isize
isize2 = isize
if isinstance(isize, (tuple, )):
isize1 = isize[0]
isize2 = isize[1]
shape_img = (n_batch, 1, isize1, isize2)
n_kern = 20 # 6 were used in LeNet5
shape_kern = (n_kern, 1, ksize, ksize)
n_kern1 = 30 # 16 were used in LeNet5
shape_kern1 = (n_kern1, n_kern, ksize, ksize)
logical_hid_shape = tcn.blas.GpuConv.logical_output_shape_2d(
(isize1, isize2), (ksize, ksize), 'valid')
logical_hid_shape1 = tcn.blas.GpuConv.logical_output_shape_2d(
(logical_hid_shape[0] // 2, logical_hid_shape[1] // 2),
(ksize, ksize), 'valid')
n_hid = n_kern1 * logical_hid_shape1[0] * logical_hid_shape1[1]
n_out = 10
w0 = shared_fn(0.01 * (my_rand(*shape_kern) - 0.5), 'w0')
b0 = shared_fn(my_zeros((n_kern,)), 'b0')
w1 = shared_fn(0.01 * (my_rand(*shape_kern1) - 0.5), 'w1')
b1 = shared_fn(my_zeros((n_kern1,)), 'b1')
v = shared_fn(0.01 * my_randn(n_hid, n_out), 'v')
c = shared_fn(my_zeros(n_out), 'c')
# print 'ALLOCATING ARCH: w0 shape', w0.get_value(borrow=True).shape
# print 'ALLOCATING ARCH: w1 shape', w1.get_value(borrow=True).shape
# print 'ALLOCATING ARCH: v shape', v.get_value(borrow=True).shape
x = tensor.Tensor(dtype='float32', broadcastable=(0, 1, 0, 0))('x')
y = tensor.fmatrix('y')
lr = tensor.fscalar('lr')
conv_op = conv.ConvOp(shape_img[2:], shape_kern[2:], n_kern,
n_batch, 1, 1, verbose=verbose, version=version)
conv_op1 = conv.ConvOp(
(n_kern, logical_hid_shape[0] // 2, logical_hid_shape[1] // 2),
shape_kern1[2:], n_kern1, n_batch, 1, 1, verbose=verbose, version=version)
ds_op = pool.Pool((2, 2), ignore_border=False)
if downsample_ops:
hid = tensor.tanh(ds_op(conv_op(x, w0) + b0.dimshuffle((0, 'x', 'x'))))
else:
hid = tensor.tanh(
(conv_op(x, w0) + b0.dimshuffle(
(0, 'x', 'x')))[:, :, ::2, ::2])
hid1 = tensor.tanh(conv_op1(hid, w1) + b1.dimshuffle((0, 'x', 'x')))
hid_flat = hid1.reshape((n_batch, n_hid))
out = tensor.nnet.softmax(tensor.dot(hid_flat, v) + c)
loss = tensor.sum(tensor.nnet.crossentropy_categorical_1hot(
out, tensor.argmax(y, axis=1)) * lr)
# print 'loss type', loss.type
params = [w0, b0, w1, b1, v, c]
gparams = tensor.grad(loss, params)
mode = get_mode(use_gpu, check_isfinite)
# print 'building pfunc ...'
train = pfunc(
[x, y, lr],
[loss],
mode=mode,
updates=[(p, p - g) for p, g in zip(params, gparams)])
if verbose:
theano.printing.debugprint(train)
if use_gpu:
# Check that GpuConv is used
topo = train.maker.fgraph.toposort()
conv_ops = (tcn.blas.GpuConv,
tcn.dnn.GpuDnnConv,
tcn.dnn.GpuDnnConvGradI,
tcn.dnn.GpuDnnConvGradW,
tcn.blas.BaseGpuCorrMM)
assert len([n for n in topo if isinstance(n.op, conv_ops)]) > 0
shape_target = (n_batch, n_out)
return train, params, shape_img, shape_target, mode
def run_conv_nnet2_classif(use_gpu, seed, isize, ksize, bsize,
n_train=10,
check_isfinite=True,
pickle=False,
verbose=0,
version=-1):
"""Run the train function returned by build_conv_nnet2_classif on one device.
"""
utt.seed_rng(seed) # Seeds numpy.random with seed
train, params, x_shape, y_shape, mode = build_conv_nnet2_classif(
use_gpu=use_gpu,
isize=isize,
ksize=ksize,
n_batch=bsize,
verbose=verbose,
version=version,
check_isfinite=check_isfinite)
if use_gpu:
device = 'GPU'
else:
device = 'CPU'
xval = my_rand(*x_shape)
yval = my_rand(*y_shape)
lr = theano._asarray(0.01, dtype='float32')
rvals = my_zeros(n_train)
for i in xrange(n_train):
rvals[i] = train(xval, yval, lr)[0]
print_mode(mode)
if pickle and isinstance(mode, theano.compile.ProfileMode):
import pickle
print("BEGIN %s profile mode dump" % device)
print(pickle.dumps(mode))
print("END %s profile mode dump" % device)
# print "%s time: %.3f" % (device, t1-t0)
# print "estimated time for one pass through MNIST with %s: %f" % (
# device, (t1-t0) * (60000.0 / (n_train*bsize)))
def cmp_run_conv_nnet2_classif(seed, isize, ksize, bsize,
ignore_error=False,
n_train=10,
gpu_only=False,
cpu_only=False,
float_atol=1e-06,
check_isfinite=True,
pickle=False,
verbose=0,
version=-1):
"""Run the nnet2 function on 1 or 2 devices, and compares the results.
float_atol: None mean use the default value.
check_isfinite: the debug mode option. We forward this value to debug mode.
For some parameter CrossentropyCategorical1Hot op generate inf when not optimized.
"""
if config.mode == 'DEBUG_MODE':
n_train = 1
# Change global tolerance, used in DebugMode for instance
orig_float32_atol = theano.tensor.basic.float32_atol
try:
if float_atol:
# print "float_atol", float_atol
theano.tensor.basic.float32_atol = float_atol
if gpu_only and cpu_only:
raise ValueError("Please use only one of cpu_only and gpu_only")
elif cpu_only:
use_gpu = False
compare = False
elif gpu_only:
use_gpu = True
compare = False
else:
compare = True
if not compare:
return run_conv_nnet2_classif(
use_gpu=use_gpu,
seed=seed, isize=isize, ksize=ksize, bsize=bsize,
n_train=n_train,
check_isfinite=check_isfinite,
pickle=pickle,
verbose=verbose,
version=version)
utt.seed_rng(seed) # Seeds numpy.random with seed
train_cpu, params_cpu, x_shape, y_shape, mode_cpu = \
build_conv_nnet2_classif(
use_gpu=False,
isize=isize,
ksize=ksize,
n_batch=bsize,
verbose=verbose,
version=version,
check_isfinite=check_isfinite)
utt.seed_rng(seed) # Seeds numpy.random with seed
train_gpu, params_gpu, x_shape_gpu, y_shape_gpu, mode_gpu = \
build_conv_nnet2_classif(
use_gpu=True,
isize=isize,
ksize=ksize,
n_batch=bsize,
verbose=verbose,
version=version,
check_isfinite=check_isfinite)
assert x_shape == x_shape_gpu
assert y_shape == y_shape_gpu
xval = my_rand(*x_shape)
yval = my_rand(*y_shape)
lr = theano._asarray(0.01, dtype='float32')
time_cpu = 0
time_gpu = 0
for i in range(n_train):
# Train one batch on CPU
t0 = time.time()
rval_cpu = train_cpu(xval, yval, lr)[0]
t1 = time.time()
time_cpu += (t1 - t0)
# Train one batch on GPU
t0 = time.time()
rval_gpu = train_gpu(xval, yval, lr)[0]
t1 = time.time()
time_gpu += (t1 - t0)
# Compare results
if (verbose or not
numpy.allclose(rval_cpu, rval_gpu, rtol=1e-5, atol=float_atol)):
print("At batch:", i + 1)
print("CPU:", rval_cpu)
print("GPU:", rval_gpu)
print("abs diff:", numpy.absolute(rval_gpu - rval_cpu))
print("rel diff:", numpy.absolute((
rval_gpu - rval_cpu) / rval_gpu))
if not ignore_error:
utt.assert_allclose(rval_cpu, rval_gpu,
rtol=1e-5, atol=float_atol)
# Synchronize parameters to start from the same point next time
if i < n_train - 1:
for cpu_p, gpu_p in zip(params_cpu, params_gpu):
cpu_p.set_value(gpu_p.get_value(borrow=False), borrow=True)
finally:
theano.tensor.basic.float32_atol = orig_float32_atol
# print "CPU time: %.3f, GPU time: %.3f, speed up %f" % (
# (time_cpu, time_gpu, time_cpu/time_gpu))
# print "Estimated time for one pass through MNIST with CPU: %f" % (
# (time_cpu * (60000.0 / (n_train*bsize))))
# print "Estimated time for one pass through MNIST with GPU: %f" % (
# (time_gpu * (60000.0 / (n_train*bsize))))
# Default parameters for all subsequent tests
gpu_only = False
cpu_only = False
ignore_error = False
verbose = 0
version = -1
seed = utt.fetch_seed()
def test_lenet_28(): # MNIST
cmp_run_conv_nnet2_classif(seed, 28, 5, 60, n_train=10,
ignore_error=ignore_error, gpu_only=gpu_only,
cpu_only=cpu_only, verbose=verbose, version=version)
def test_lenet_32(): # CIFAR10 / Shapeset
cmp_run_conv_nnet2_classif(seed, 32, 5, 60, n_train=8,
ignore_error=ignore_error, gpu_only=gpu_only,
verbose=verbose, version=version)
def test_lenet_32_long(): # CIFAR10 / Shapeset
# this tests the gradient of pool on the GPU,
# which does not recieve specific testing
cmp_run_conv_nnet2_classif(seed, 32, 5, 30, n_train=50,
ignore_error=ignore_error, gpu_only=gpu_only,
cpu_only=cpu_only, verbose=verbose, version=version)
def test_lenet_64(): # ???
# float_atol need to pass in debug mode
# needed as cpu use extended precision and gpu don't
cmp_run_conv_nnet2_classif(seed, 64, 7, 10, n_train=10,
ignore_error=ignore_error, gpu_only=gpu_only,
cpu_only=cpu_only, verbose=verbose,
check_isfinite=True, version=version)
def test_lenet_108(): # NORB
cmp_run_conv_nnet2_classif(seed, 108, 7, 5, n_train=4,
ignore_error=ignore_error, gpu_only=gpu_only,
cpu_only=cpu_only, verbose=verbose,
check_isfinite=True, version=version)
def test_lenet_256(): # ImageNet
cmp_run_conv_nnet2_classif(seed, 256, 9, 2, n_train=5,
ignore_error=ignore_error, gpu_only=gpu_only,
cpu_only=cpu_only, verbose=verbose,
check_isfinite=True, version=version, float_atol=5e-5)
# I did a wanted error in the name as we don't want it to execute automatically for now as it don't work
def tes_lenet_hd(): # HD 720p: 1280(wid)x720(len)
cmp_run_conv_nnet2_classif(seed, (720, 1280), 9, 2, n_train=3,
ignore_error=ignore_error, gpu_only=gpu_only,
cpu_only=cpu_only, verbose=verbose,
check_isfinite=True, version=version)
# I did a wanted error in the name as we don't want it to execute automatically for now as it don't work
def tes_lenet_full_hd(): # HD 1080p: 1920(wid)x1080(len)
cmp_run_conv_nnet2_classif(seed, (1080, 1920), 9, 2, n_train=3,
ignore_error=ignore_error, gpu_only=gpu_only,
cpu_only=cpu_only, verbose=verbose,
check_isfinite=True, version=version)
| |
'''
Multipoint Application Sharing protocol (T.128)
'''
import functools, itertools, types, builtins, operator, sys, six
import ptypes, protocol.gcc as gcc
from ptypes import *
ptypes.setbyteorder(ptypes.config.byteorder.littleendian)
### atomic definitions
class Integer8(pint.uint8_t): pass
class Integer16(pint.uint16_t): pass
class Integer32(pint.uint32_t): pass
class Boolean8(pint.enum, Integer8):
_values_ = [
('FALSE', 0x00),
('TRUE', 0x01),
]
class Boolean16(pint.enum, Integer16):
_values_ = [
('FALSE', 0x0000),
('TRUE', 0x0001),
]
class ShareId(pstruct.type):
class UserId(pint.enum, pint.littleendian(gcc.ChannelId)):
'''default value is set to server channel id (1002)'''
_values_ = [
('serverChannelId', 1002),
]
def properties(self):
res = super(ShareId.UserId, self).properties()
if self.initializedQ():
res['valid'] = self.valid()
return res
def default(self):
return self.set(0x3ea)
def valid(self):
return self.copy().default().int() == self.int()
def alloc(self, **attrs):
return super(ShareId.UserId, self).alloc(**attrs).default()
_fields_ = [
(UserId, 'userId'),
(pint.uint16_t, 'counter'),
]
def alloc(self, **fields):
res = super(ShareId, self).alloc(**fields)
if 'userId' not in res:
res['userId'].default()
return res
def summary(self):
return "userId={:d} counter={:d}".format(self['userId'].int(), self['counter'].int())
class Coordinate16(Integer16): pass
### Container types
class Rectangle16(pstruct.type):
_fields_ = [
(Integer16, 'left'),
(Integer16, 'top'),
(Integer16, 'right'),
(Integer16, 'bottom'),
]
### Lookup types
class PDUType(ptype.definition):
cache = {}
class type(pbinary.enum):
length, _values_ = 4, [
('confirmActive', 3),
('data', 7),
('deactivateAll', 6),
('deactivateOther', 4),
('deactivateSelf', 5),
('demandActive', 1),
('requestActive', 2),
('serverRedirect', 10),
]
class CapabilitySetType(ptype.definition):
cache = {}
class Choice(pint.enum, pint.uint16_t):
_values_ = [
('bitmapCacheCapabilitySet', 4),
('bitmapCapabilitySet', 2),
('colorCacheCapabilitySet', 10),
('controlCapabilitySet', 5),
('generalCapabilitySet', 1),
('orderCapabilitySet', 3),
('pointerCapabilitySet', 8),
('activationCapabilitySet', 7),
('shareCapabilitySet', 9),
]
class PDUType2(ptype.definition):
cache = {}
class type(pint.enum, pint.uint8_t):
_values_ = [
('application', 25),
('control', 20),
('font', 11),
('flowResponse', 66),
('flowStop', 67),
('flowTest', 65),
('input', 28),
('mediatedControl', 29),
('pointer', 27),
('remoteShare', 30),
('synchronize', 31),
('update', 2),
('updateCapability', 32),
('windowActivation', 23),
('windowList', 24),
]
class PDUTypeFlow(ptype.definition):
cache = {}
class type(pint.enum, Integer16):
_values_ = [
('response', 66),
('stop', 67),
('test', 65),
]
### capability set definitions
class OSMajorType(pint.enum, pint.uint16_t):
_values_ = [
('unspecified', 0),
('windows', 1),
('os2', 2),
('macintosh', 3),
('unix', 4),
('ios', 5),
('osx', 6),
('android', 7),
('chromeOS', 8),
]
class OSMinorType(pint.enum, pint.uint16_t):
_values_ = [
('unspecified', 0),
('windows-31x', 1),
('windows-95', 2),
('windows-NT', 3),
('oOS2-V21', 4),
('power-pc', 5),
('macintosh', 6),
('native-XServer', 7),
('pseudo-XServer', 8),
('windows-RT', 9),
]
@CapabilitySetType.define
class GeneralCapabilitySet(pstruct.type):
type = 1
class _protocolVersion(pint.enum, Integer16):
_values_ = [
('TS_CAPS_PROTOCOLVERSION', 0x0200),
]
def default(self):
return self.set(0x0200)
def valid(self):
return self.copy().default().int() == self.int()
def properties(self):
res = super(GeneralCapabilitySet._protocolVersion, self).properties()
res['valid'] = self.valid()
return res
@pbinary.littleendian
class _extraFlags(pbinary.flags):
_fields_ = [
(5, 'unused0'),
(1, 'NO_BITMAP_COMPRESSION_HDR'),
(5, 'unused6'),
(1, 'ENC_SALTED_CHECKSUM'),
(1, 'AUTORECONNECT_SUPPORTED'),
(1, 'LONG_CREDENTIALS_SUPPORTED'),
(1, 'reserved'),
(1, 'FASTPATH_OUTPUT_SUPPORTED'),
]
_fields_ = [
(OSMajorType, 'osMajorType'),
(OSMinorType, 'osMinorType'),
(_protocolVersion, 'protocolVersion'),
(Integer16, 'pad2octetsA'),
(Integer16, 'generalCompressionTypes'),
(_extraFlags, 'extraFlags'),
(Integer16, 'updatecapabilityFlag'),
(Integer16, 'remoteUnshareFlag'),
(Integer16, 'generalCompressionLevel'),
(Integer16, 'pad2octetsC'),
(Boolean16, 'refreshRectSupport'),
(Boolean16, 'suppressOutputSupport'),
]
@pbinary.littleendian
class BitmapCompressionCapabilityFlags(pbinary.enum):
length, _values_ = 16, [
('FALSE', 0x0000),
('TRUE', 0x0001),
]
@pbinary.littleendian
class DRAW_(pbinary.flags):
_fields_ = [
(3, 'unused'),
(1, 'UNUSED_FLAG'),
(1, 'ALLOW_SKIP_ALPHA'),
(1, 'ALLOW_COLOR_SUBSAMPLING'),
(1, 'ALLOW_DYNAMIC_COLOR_FIDELITY'),
(1, 'RESERVED'),
]
@CapabilitySetType.define
class BitmapCapabilitySet(pstruct.type):
type = 2
_fields_ = [
(Integer16, 'preferredBitsPerPixel'),
(Boolean16, 'receive1BitPerPixelFlag'),
(Boolean16, 'receive4BitsPerPixelFlag'),
(Boolean16, 'receive8BitsPerPixelFlag'),
(Integer16, 'desktopWidth'),
(Integer16, 'desktopHeight'),
(Integer16, 'pad2octetsA'),
(Boolean16, 'desktopResizeFlag'),
(BitmapCompressionCapabilityFlags, 'bitmapCompressionType'),
(Integer8, 'highColorFlags'),
(DRAW_, 'drawingFlags'),
(Boolean16, 'multipleRectangleSupport'),
(Integer16, 'pad2octetsB'),
]
@pbinary.littleendian
class OrderCapabilityFlags(pbinary.flags):
_fields_ = [
(8, 'unused0'),
(1, 'orderFlagsExtraFlags'),
(1, 'solidPatternBrushOnly'),
(1, 'colorIndexSupport'),
(1, 'unused11'),
(1, 'zeroBoundsDeltasSupport'),
(1, 'cannotReceiveOrders'),
(1, 'negotiateOrderSupport'),
(1, 'unused15'),
]
@pbinary.littleendian
class TextCapabilityFlags(pbinary.flags):
_fields_ = [
(5, 'unused0'),
(1, 'allowCellHeight'),
(1, 'useBaselineStart'),
(1, 'unused7'),
(1, 'checkFontSignatures'),
(1, 'unused9'),
(1, 'allowDeltaXSimulation'),
(4, 'unused11'),
(1, 'checkFontAspect'),
]
@pbinary.littleendian
class ORDERFLAGS_EX_(pbinary.flags):
_fields_ = [
(13, 'unused0'),
(1, 'ALTSEC_FRAME_MARKER_SUPPORT'),
(1, 'CACHE_BITMAP_REV3_SUPPORT'),
(1, 'unused15'),
]
class ORD_LEVEL_(pint.enum, Integer16):
_values_ = [
('LEVEL_1_ORDERS', 1),
]
def default(self):
return self.set('LEVEL_1_ORDERS')
def valid(self):
return self.copy().default().int() == self.int()
def properties(self):
res = super(ORD_LEVEL_, self).properties()
res['valid'] = self.valid()
return res
@CapabilitySetType.define
class OrderCapabilitySet(pstruct.type):
type = 3
class _orderSupport(parray.type):
length, _object_ = 32, Boolean8
def __init__(self, **attrs):
super(OrderCapabilitySet._orderSupport, self).__init__(**attrs)
self.__nameByIndex__ = { key : name for name, key in self._values_ }
self.__indexByName__ = { name : key for name, key in self._values_ }
def __getindex__(self, index):
undefined = 'undefinedOrder'
return self.__indexByName__.get(index, int(index[len(undefined):]) if isinstance(index, six.string_types) and index.startswith(undefined) else index)
def summary(self):
res = [ index for index, item in enumerate(self) if item.int() > 0 ]
res = [ "{:s}({:d})".format(self.__nameByIndex__.get(index, "undefinedOrder"), index) for index in res ]
return "{{{:s}}}".format(', '.join(res))
def details(self):
res = []
for index, item in enumerate(self):
res.append("[{:x}] <instance {:s} '{:s}'> (index {:d}) {:s}".format(item.getoffset(), item.classname(), self.__nameByIndex__.get(index, "undefinedOrder{:d}".format(index)), index, item.summary()))
return '\n'.join(res)
def repr(self):
return self.details()
_values_ = [
('destinationBltSupport', 0),
('patternBltSupport', 1),
('screenBltSupport', 2),
('memoryBltSupport', 3),
('memoryThreeWayBltSupport', 4),
('textSupport', 5),
('extendedTextSupport', 6),
('rectangleSupport', 7),
('lineSupport', 8),
('frameSupport', 9),
('opaqueRectangleSupport', 10),
('desktopSaveSupport', 11),
('multipleDestinationBltSupport', 15),
('multiplePatternBltSupport', 16),
('multipleScreenBltSupport', 17),
('multipleOpaqueRectangleSupport', 18),
('fastIndexSupport', 19),
('polygonSCSupport', 20),
('polygonCBSupport', 21),
('polylineSupport', 22),
('fastGlyphSupport', 24),
('ellipseSCSupport', 25),
('ellipseCBSupport', 26),
('glyphIndexSupport', 27),
]
_fields_ = [
(dyn.array(pint.uint8_t, 16), 'terminalDescriptor'),
(Integer32, 'pad4octetsA'),
(Integer16, 'desktopXGranularity'),
(Integer16, 'desktopYGranularity'),
(Integer16, 'pad2octetsA'),
(Integer16, 'maximumOrderLevel'),
(Integer16, 'numberFonts'),
(OrderCapabilityFlags, 'orderFlags'),
(_orderSupport, 'orderSupport'),
(TextCapabilityFlags, 'textFlags'),
(ORDERFLAGS_EX_, 'orderSupportExFlags'),
(Integer32, 'pad4octetsB'),
(Integer32, 'desktopSaveSize'),
(Integer16, 'pad2octetsC'),
(Integer16, 'pad2octetsD'),
(Integer16, 'textANSICodePage'),
(Integer16, 'pad2octetsE'),
]
@pbinary.littleendian
class ControlCapabilityFlags(pbinary.flags):
_fields_ = [
(15, 'unused'),
(1, 'allowMediateControl'),
]
class ControlPriority(pint.enum, Integer16):
_values_ = [
('always', 1),
('never', 2),
('confirm', 3),
]
@CapabilitySetType.define
class ControlCapabilitySet(pstruct.type):
type = 5
_fields_ = [
(ControlCapabilityFlags, 'controlFlags'),
(Boolean16, 'remoteDetachFlag'),
(ControlPriority, 'controlInterest'),
(ControlPriority, 'detachInterest'),
]
@CapabilitySetType.define
class ActivationCapabilitySet(pstruct.type):
type = 7
_fields_ = [
(Boolean16, 'helpKeyFlag'),
(Boolean16, 'helpIndexKeyFlag'),
(Boolean16, 'helpExtendedKeyFlag'),
(Boolean16, 'windowActivateFlag'),
]
@CapabilitySetType.define
class PointerCapabilitySet(pstruct.type):
type = 8
_fields_ = [
(Boolean16, 'colorPointerFlag'),
(Integer16, 'colorPointerCacheSize'),
(Integer16, 'pointerCacheSize'),
]
@CapabilitySetType.define
class ShareCapabilitySet(pstruct.type):
type = 9
_fields_ = [
(ShareId.UserId, 'nodeID'),
(Integer16, 'pad2octets'),
]
@CapabilitySetType.define
class ColorCacheCapabilitySet(pstruct.type):
type = 10
_fields_ = [
(Integer16, 'colorTableCacheSize'),
(Integer16, 'pad2octetsA'),
]
class CapabilitySet(pstruct.type):
def __capabilityParameters(self):
type, capacity = (self[fld].li for fld in ['capabilitySetType','lengthCapability'])
total = type.size() + capacity.size()
return CapabilitySetType.get(type.int(), blocksize=lambda self, cb=max(0, capacity.int() - total): cb)
_fields_ = [
(CapabilitySetType.Choice, 'capabilitySetType'),
(Integer16, 'lengthCapability'),
(__capabilityParameters, 'capabilityData'),
]
def alloc(self, **fields):
res = super(CapabilitySet, self).alloc(**fields)
if 'capabilitySetType' not in fields:
res.set(capabilitySetType=res['capabilityData'].type)
if 'lengthCapability' not in fields:
res.set(lengthCapability=self['capabilityData'].size() + sum(self[fld].size() for fld in ['capabilitySetType','lengthCapability']))
return res
class CombinedCapabilities(pstruct.type):
_fields_ = [
(Integer16, 'numberCapabilities'),
(Integer16, 'pad2octets'),
(lambda self: dyn.array(CapabilitySet, self['numberCapabilities'].li.int()), 'capabilitySets'),
]
def alloc(self, **fields):
res = super(CombinedCapabilities, self).alloc(**fields)
return res if 'numberCapabilities' in fields else res.set(numberCapabilities=len(res['capabilitySets']))
### PDUType definitions
@pbinary.littleendian
class PDUShareType(pbinary.struct):
_fields_ = [
(12, 'protocolVersion'),
(PDUType.type, 'type'),
]
class ShareControlHeader(pstruct.type):
def __pduSource(self):
# In some cases (DeactivateAllPDU), the blocksize() of this PDU might be
# 4 which requires discarding the pduSource. To fix this, we check for
# the size explicitly and only include it if the size is correct.
try:
res = self.getparent(SharePDU)
return pint.uint_t if res['totalLength'].li.int() == 4 else ShareId.UserId
except ptypes.error.ItemNotFoundError: pass
return ShareId.UserId
_fields_ = [
(PDUShareType, 'pduType'),
(__pduSource, 'pduSource'),
]
def summary(self):
res = []
res.append("pduSource={:s}".format(self['pduSource'].summary()))
res.append("pduType={:s}({:d}) protocolVersion={:d}".format(self['pduType'].item('type').str(), self['pduType']['type'], self['pduType']['protocolVersion']))
return ' '.join(res)
class ShareControlPDU(pstruct.type):
def __shareControlPacket(self):
try:
parent = self.getparent(SharePDU)
length = parent['totalLength'].li
# If we don't have a parent, we can't determine the totalLength and
# so we don't need to abate this field
except ptypes.error.ItemNotFoundError:
res = self['shareControlHeader'].li
return PDUType.withdefault(res['pduType']['type'], ptype.undefined)
# However, if we do have a length then use it to abate the structure
# that gets chosen as shareControlPacket
res = self['shareControlHeader'].li
total = length.size() + res.size()
return PDUType.get(res['pduType']['type'], ptype.block, blocksize=lambda self, cb=max(0, length.int() - total): cb)
_fields_ = [
(ShareControlHeader, 'shareControlHeader'),
(__shareControlPacket, 'shareControlPacket'),
]
class SourceDescriptor(ptype.block):
def summary(self):
data = self.serialize().decode('latin1')
encoded = data.encode('unicode_escape')
return "({:d}) \"{:s}\"".format(self.size(), encoded.decode(sys.getdefaultencoding()).replace('"', '\\"'))
@PDUType.define
class DemandActivePDU(pstruct.type):
type = 0x1
_fields_ = [
(ShareId, 'shareId'),
(Integer16, 'lengthSourceDescriptor'),
(Integer16, 'lengthCombinedCapabilities'),
(lambda self: dyn.clone(SourceDescriptor, length=self['lengthSourceDescriptor'].li.int()), 'sourceDescriptor'),
(CombinedCapabilities, 'combinedCapabilities'),
(pint.uint32_t, 'sessionId'),
]
def alloc(self, **fields):
res = super(DemandActivePDU, self).alloc(**fields)
flds = {}
if 'lengthSourceDescriptor' not in fields:
flds['lengthSourceDescriptor'] = res['sourceDescriptor'].size()
if 'lengthCombinedCapabilities' not in fields:
flds['lengthCombinedCapabilities'] = res['combinedCapabilities'].size()
return res.set(**flds) if flds else res
@PDUType.define
class RequestActivePDU(DemandActivePDU):
type = 2
_fields_ = [
(Integer16, 'lengthSourceDescriptor'),
(Integer16, 'lengthCombinedCapabilities'),
(lambda self: dyn.clone(SourceDescriptor, length=self['lengthSourceDescriptor'].li.int()), 'sourceDescriptor'),
(CombinedCapabilities, 'combinedCapabilities'),
]
@PDUType.define
class ConfirmActivePDU(DemandActivePDU):
type = 3
_fields_ = [
(ShareId, 'shareId'),
(ShareId.UserId, 'originatorId'),
(Integer16, 'lengthSourceDescriptor'),
(Integer16, 'lengthCombinedCapabilities'),
(lambda self: dyn.clone(SourceDescriptor, length=self['lengthSourceDescriptor'].li.int()), 'sourceDescriptor'),
(CombinedCapabilities, 'combinedCapabilities'),
]
class DeactivatePDU(pstruct.type):
'''
This definition is used only to consolidate all of the deactivation PDUs
(DeactivateOtherPDU, DeactivateSelfPDU, and DeactivateAllPDU) under the
same base type so that they can be tested against if necessary.
'''
@PDUType.define
class DeactivateOtherPDU(DeactivatePDU):
type = 4
_fields_ = [
(ShareId, 'shareId'),
(ShareId.UserId, 'deactivateId'),
(Integer16, 'lengthSourceDescriptor'),
(lambda self: dyn.clone(SourceDescriptor, length=self['lengthSourceDescriptor'].li.int()), 'sourceDescriptor'),
]
def alloc(self, **fields):
res = super(DeactivateOtherPDU, self).alloc(**fields)
flds = {}
if 'lengthSourceDescriptor' not in fields:
flds['lengthSourceDescriptor'] = res['sourceDescriptor'].size()
return res.set(**flds) if flds else res
def summary(self):
res = self['sourceDescriptor'].serialize().decode('latin1')
encoded = res.encode('unicode_escape')
return "shareId={:s} deactivateId={:s} sourceDescriptor=\"{:s}\"".format(self['shareId'].summary(), self['deactivateId'].summary(), encoded.decode(sys.getdefaultencoding()).replace('"', '\\"'))
@PDUType.define
class DeactivateSelfPDU(DeactivatePDU):
type = 5
_fields_ = [
(ShareId, 'shareId'),
]
def summary(self):
return "shareId={:s}".format(self['shareId'].summary())
@PDUType.define
class DeactivateAllPDU(DeactivatePDU):
type = 6
_fields_ = [
(ShareId, 'shareId'),
(Integer16, 'lengthSourceDescriptor'),
(lambda self: dyn.clone(SourceDescriptor, length=self['lengthSourceDescriptor'].li.int()), 'sourceDescriptor'),
]
def summary(self):
res = self.properties()
# This packet will sometimes be clamped and so to deal with this case,
# we'll explicitly check for the abated property before dumping it out
# regularly.
if not res.get('abated', False):
res = self['sourceDescriptor'].serialize().decode('latin1')
encoded = res.encode('unicode_escape')
return "shareId={:s} sourceDescriptor=\"{:s}\"".format(self['shareId'].summary(), encoded.decode(sys.getdefaultencoding()).replace('"', '\\"'))
# Otherwise, the structure is abated so we'll emit the parent's summary
return super(DeactivateAllPDU, self).summary()
class StreamId(pint.enum, Integer8):
_values_ = [
('streamUndefined', 0x0),
('streamLowPriority', 0x1),
('streamMediumPriority', 0x2),
('streamHighPriority', 0x4),
]
class ShareDataHeader(pstruct.type):
_fields_ = [
(ShareId, 'shareId'),
(Integer8, 'pad1octet'),
(StreamId, 'streamId'),
]
def summary(self):
res = []
res.append("streamId={:s}({:d})".format(self['streamId'].str(), self['streamId'].int()))
res.append("shareId=({:d},{:d})".format(self['shareId']['userId'].int(), self['shareId']['counter'].int()))
return ' '.join(res)
class ShareDataPacket(pstruct.type):
def __data(self):
res = sum(self[fld].li.size() for fld in ['pduType2','compressedType','compressedLength'])
if self['compressedType']['COMPRESSED']:
return dyn.block(max(0, self.blocksize() - res))
return PDUType2.withdefault(self['pduType2'].li.int(), ptype.block, length=max(0, self.blocksize() - res))
def __unparsed(self):
res = sum(self[fld].li.size() for fld in ['pduType2','compressedType','compressedLength','data'])
return dyn.block(max(0, self.blocksize() - res))
_fields_ = [
(PDUType2.type, 'pduType2'),
(Integer8, 'generalCompressedType'),
(Integer16, 'generalCompressedLength'),
(__data, 'data'),
(__unparsed, 'undefined'), # FIXME: this padding is based on the blocksize because I can't really figure out how the PDUType2's are supposed to be sized
]
def summary(self):
return "pduType2={:s}({:d}) data={:s}".format(self['pduType2'].str(), self['pduType2'].int(), self['data'].instance())
def alloc(self, **fields):
res = super(ShareDataPacket, self).alloc(**fields)
return res.set(pduType2=res['data'].type) if 'pduType2' not in fields and hasattr(res['data'], 'type') else res
@PDUType.define
class DataPDU(pstruct.type):
type = 7
def __shareDataPacket(self):
if False:
# First check to see if the uncompressed length was specified as then we
# can use this to calculate the actual blocksize of the ShareDataPacket
res = self['uncompressedLength'].li
if res.int():
return dyn.clone(ShareDataPacket, blocksize=(lambda self, cb=res.int() - 0xe: cb)) if 0xe <= res.int() else ShareDataPacket
# Otherwise, we'll need to traverse to the parent to grab the packet
# size. We might not be able to be too sure about this since the field
# name is "totalLength" which could imply that the length is the full
# size of the entire SharePDU.
try:
parent = self.getparent(SharePDU)
total = self.getoffset() - parent.getoffset()
total += sum(self[fld].li.size() for fld in ['shareDataHeader','uncompressedLength'])
# Nothing was found, so don't bother trying to adjust the ShareDataPacket
# since this might be being constructed by the user.
except ptypes.error.ItemNotFoundError:
return ShareDataPacket
# Now that we've figured out the total size and our parent that the size
# is relative to, we should be able to bound the ShareDataPacket according
# to what's left.
res = parent['totalLength'].li.int()
return dyn.clone(ShareDataPacket, blocksize=(lambda self, cb=res - total: cb)) if total <= res else ShareDataPacket
_fields_ = [
(ShareDataHeader, 'shareDataHeader'),
(Integer16, 'uncompressedLength'),
(__shareDataPacket, 'shareDataPacket'),
]
#def alloc(self, **fields):
# fields.setdefault('uncompressedLength', 4)
# res = super(DataPDU, self).alloc(**fields)
# return res if 'uncompressedLength' in fields else res.set(uncompressedLength=res['shareDataPacket'].size())
#def summary(self):
# res = self['shareDataHeader'].li
# return "shareDataHeader.streamId={:s}({:d}) shareDataHeader.pduType2={:s}({:d}) shareDataPacket={:s}".format(res['streamId'].str(), res['streamId'].int(), res['pduType2'].str(), res['pduType2'].int(), self['shareDataPacket'].instance())
### PDUType2 definitions
class UpdateType(ptype.definition):
cache = {}
class type(pint.enum, Integer16):
_values_ = [
('orders', 0),
('bitmap', 1),
('palette', 2),
('synchronize', 3),
]
@PDUType2.define
class UpdatePDU(pstruct.type):
type = 2
def __updateData(self):
res = self['updateType'].li
return UpdateType.lookup(res.int(), ptype.undefined)
_fields_ = [
(UpdateType.type, 'updateType'),
(__updateData, 'updateData'),
]
def summary(self):
return "updateType={:s} updateData={:s}".format(self['updateType'].summary(), self['updateData'].instance())
@UpdateType.define
class UpdateOrdersPDU(pstruct.type):
type = 0
_fields_ = [
(Integer16, 'pad2octetsA'),
(Integer16, 'numberOrders'),
(Integer16, 'pad2octetsB'),
(lambda self: dyn.array(ptype.undefined, self['numberOrders'].li.int()), 'orderList'), # FIXME: not implemented
]
class CompressedBitmapData(pstruct.type):
_fields_ = [
(Integer16, 'pad2octets'),
(Integer16, 'mainBodySize'),
(Integer16, 'rowSize'),
(Integer16, 'uncompressedSize'),
(ptype.block, 'compressedBitmap'),
]
class BitmapData(pstruct.type):
_fields_ = [
(ptype.block, 'uncompressedBitmapData'),
(CompressedBitmapData, 'compressedBitmapData'),
]
class TS_BITMAP_DATA(pstruct.type):
class _flags(pbinary.flags):
_fields_ = [
(5, 'unused'),
(1, 'NO_BITMAP_COMPRESSION_HDR'),
(9, 'unused'),
(1, 'BITMAP_COMPRESSION'),
]
_fields_ = [
(Coordinate16, 'destLeft'),
(Coordinate16, 'destTop'),
(Coordinate16, 'destRight'),
(Coordinate16, 'destBottom'),
(Integer16, 'width'),
(Integer16, 'height'),
(Integer16, 'bitsPerPixel'),
(Boolean16, 'compressedFlag'),
(Integer16, 'bitmapLength'),
(lambda self: dyn.block(self['bitmapLength'].li.int()), 'bitmapData'), # XXX
]
@UpdateType.define
class UpdateBitmapPDU(pstruct.type):
'''Microsoft's version of this structure is different from the T.128 specification'''
type = 1
_fields_ = [
#(Integer16, 'pad2octetsA'),
(Integer16, 'numberRectangles'),
(lambda self: dyn.array(TS_BITMAP_DATA, self['numberRectangles'].li.int()), 'rectangles'),
]
class Color(pstruct.type):
_fields_ = [
(Integer8, 'red'),
(Integer8, 'green'),
(Integer8, 'blue'),
]
def summary(self):
red, green, blue = (self[fld].li.int() for fld in ['red','green','blue'])
return "red={:d} green={:d} blue={:d}".format(red, green, blue)
@UpdateType.define
class UpdatePalettePDU(pstruct.type):
type = 2
_fields_ = [
(Integer16, 'pad2octets'),
(Integer32, 'numberColors'),
(lambda self: dyn.array(Color, self['numberColors'].li.int()), 'palette'),
]
@UpdateType.define
class UpdateSynchronizePDU(pstruct.type):
type = 3
_fields_ = [
(Integer16, 'pad2octets'),
]
class SynchronizeMessageType(pint.enum, pint.uint16_t):
_values_ = [
('synchronize', 1),
]
@PDUType2.define
class SynchronizePDU(pstruct.type):
type = 31
_fields_ = [
(SynchronizeMessageType, 'messageType'),
(ShareId.UserId, 'targetUser'),
]
def summary(self):
return "messageType={:s} targetUser={:s}".format(*(self[fld].summary() for fld in ['messageType','targetUser']))
class ControlAction(pint.enum, pint.uint16_t):
_values_ = [
('requestControl', 1),
('detach', 3),
('grantControl', 2),
('cooperate', 4),
]
@PDUType2.define
class ControlPDU(pstruct.type):
type = 20
_fields_ = [
(ControlAction, 'action'),
(ShareId.UserId, 'grantId'),
(Integer32, 'controlId'),
]
def summary(self):
return "action={:s} grantId={:s} controlId={:s}".format(*(self[fld].summary() for fld in ['action','grantId','controlId']))
class InputMessageType(pint.enum, Integer16):
_values_ = [
('inputSynchronize', 0),
('inputCodePoint', 1),
('inputVirtualKey', 2),
('inputPointingDevice', 32769),
]
@pbinary.littleendian
class PointingDeviceFlags(pbinary.flags):
_fields_ = [
(1, 'down'), # 15
(1, 'button3'), # 14
(1, 'button2'), # 13
(1, 'button1'), # 12
(1, 'move'), # 11
(11, 'unused'),
]
class PointingDeviceEvent(pstruct.type):
_fields_ = [
(Integer32, 'eventTime'),
(InputMessageType, 'messageType'),
(PointingDeviceFlags, 'pointingDeviceFlags'),
(Coordinate16, 'pointingDeviceY'),
(Coordinate16, 'pointingDeviceY'),
]
@pbinary.littleendian
class KeyboardFlags(pbinary.flags):
_fields_ = [
(1, 'release'), # 15
(1, 'down'), # 14
(1, 'reserved'),
(1, 'quiet'), # 12
(11, 'unused'),
(1, 'right'), # 0
]
class KeyboardEvent(pstruct.type):
_fields_ = [
(Integer32, 'eventTime'),
(InputMessageType, 'messageType'),
(KeyboardFlags, 'keyboardFlags'),
(Integer16, 'keyCode'),
]
class InputEvent(pstruct.type):
class Choice(pbinary.enum):
length, _values_ = 3, [
('pointingDevice', 0),
('keyboard', 1),
('synchronize', 2),
]
def __event(self):
res = self['choice'].li
if res.int() == 0:
return PointingDeviceEvent
elif res.int() == 1:
return KeyboardEvent
elif res.int() == 2:
return SynchronizeEvent
raise NotImplementedError
_fields_ = [
(Choice, 'choice'),
(__event, 'event'),
]
class SynchronizeEvent(pstruct.type):
# FIXME: not sure how this is supposed to be defined
_fields_ = [
(Integer32, 'eventTime'),
(ptype.undefined, 'nonStandardParameters'),
]
@PDUType2.define
class InputPDU(pstruct.type):
type = 28
_fields_ = [
(Integer16, 'numberEvents'),
(Integer16, 'pad2octets'),
(lambda self: dyn.array(InputEvent, self['numberEvents'].li.int()), 'eventList'),
]
@PDUType2.define
@PDUTypeFlow.define
class FlowResponsePDU(pstruct.type):
type = 66
_fields_ = [
(Integer8, 'flowIdentifier'),
(Integer8, 'flowNumber'),
(ShareId.UserId, 'pduSource'),
]
@PDUType2.define
@PDUTypeFlow.define
class FlowStopPDU(pstruct.type):
type = 67
_fields_ = [
(Integer8, 'flowIdentifier'),
(ShareId.UserId, 'pduSource'),
]
@PDUType2.define
@PDUTypeFlow.define
class FlowTestPDU(pstruct.type):
type = 65
_fields_ = [
(Integer8, 'flowIdentifier'),
(Integer8, 'flowNumber'),
(ShareId.UserId, 'pduSource'),
]
### Share packets
class FlowPDU(pstruct.type):
def __flowPacket(self):
res = self['pduTypeFlow'].li
return PDUTypeFlow.lookup(res.int(), ptype.undefined)
_fields_ = [
(PDUTypeFlow.type, 'pduTypeFlow'),
(__flowPacket, 'flowPacket'),
]
class FlowMarker(pint.enum, Integer16):
_values_ = [
('FlowMarker', 0x8000)
]
def FlowMarkerQ(self):
return self.str() == 'FlowMarker'
def properties(self):
res = super(FlowMarker, self).properties()
res['FlowMarkerQ'] = self.FlowMarkerQ()
return res
class SharePDU(pstruct.type):
def __sharePdu(self):
res = self['totalLength'].li
if res.FlowMarkerQ():
return FlowPDU
return ShareControlPDU
_fields_ = [
(FlowMarker, 'totalLength'),
(__sharePdu, 'sharePdu'),
]
def alloc(self, **fields):
res = super(SharePDU, self).alloc(**fields)
return res if 'totalLength' in fields else res.set(totalLength='FlowMarker') if isinstance(res['sharePdu'], FlowPDU) else res.set(totalLength=res['totalLength'].size() + res['sharePdu'].size())
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import datetime
import locale
import os
import re
import sys
import pytz
import when
# This is a hack for Python 3. Python 3 has no type called basestring.
try:
basestring
except NameError:
basestring = str
sys.path.insert(0, os.path.abspath('..'))
sys.path.append('.')
class WhenTest(unittest.TestCase):
def setUp(self):
when.unset_utc()
self.one_day = datetime.timedelta(days=1)
self.one_second = datetime.timedelta(seconds=1)
self.today = datetime.date.today()
self.now = datetime.datetime.now()
self.utc = datetime.datetime.utcnow()
env_timezone = os.getenv('TIMEZONE')
if env_timezone:
self.timezone = env_timezone
elif os.path.exists('/etc/localtime'):
localtime_path = os.path.realpath('/etc/localtime')
self.timezone = re.findall('([^/]*/[^/]*)$', localtime_path)[0]
else:
self.timezone = 'America/New_York'
def test__add_time(self):
"""Test when._add_time()"""
# Test change between months with dfferent number of days
test_value = datetime.datetime(2012, 3, 31)
expected_value = datetime.datetime(2012, 5, 1)
result = when._add_time(test_value, months=1)
self.assertEqual(result, expected_value)
# Test values going back into February of a leap year
expected_value = datetime.datetime(2012, 3, 2)
result = when._add_time(test_value, months=-1)
self.assertEqual(result, expected_value)
test_value = datetime.datetime(2012, 3, 30)
expected_value = datetime.datetime(2012, 3, 1)
result = when._add_time(test_value, months=-1)
self.assertEqual(result, expected_value)
test_value = datetime.datetime(2011, 3, 31)
expected_value = datetime.datetime(2011, 3, 3)
result = when._add_time(test_value, months=-1)
self.assertEqual(result, expected_value)
# Test leap day specifically
test_value = datetime.datetime(2012, 2, 29)
expected_value = datetime.datetime(2013, 3, 1)
result = when._add_time(test_value, years=1)
self.assertEqual(result, expected_value)
expected_value = datetime.datetime(2011, 3, 1)
result = when._add_time(test_value, years=-1)
self.assertEqual(result, expected_value)
def test__add_time_typeerror(self):
"""Test TypeError raised by when._add_time()"""
self.assertRaises(TypeError, when._add_time, 'a')
def test__is_date_type(self):
"""Test when._is_date_type()"""
self.assertFalse(when._is_date_type('a'))
self.assertFalse(when._is_date_type(1))
self.assertFalse(when._is_date_type(['a']))
self.assertTrue(when._is_date_type(self.today))
self.assertTrue(when._is_date_type(self.now))
self.assertTrue(when._is_date_type(self.now.time()))
def test_all_timezones(self):
"""Test when.all_timezones()"""
# Make sure all_timezones() matches pytz's version
all_timezones = when.all_timezones()
self.assertEqual(all_timezones, pytz.all_timezones)
def test_all_timezones_set(self):
"""Test when.all_timezones_set()"""
# Make sure all_timezones_set() matches pytz's version
all_timezones_set = when.all_timezones_set()
self.assertEqual(all_timezones_set, pytz.all_timezones_set)
def test_common_timezones(self):
"""Test when.common_timezones()"""
# Make sure common_timezones() matches pytz's version
common_timezones = when.common_timezones()
self.assertEqual(common_timezones, pytz.common_timezones)
def test_common_timezones_set(self):
"""Test when.common_timezones_set()"""
# Make sure common_timezones_set() matches pytz's version
common_timezones_set = when.common_timezones_set()
self.assertEqual(common_timezones_set, pytz.common_timezones_set)
def test_ever(self):
"""Test when.ever()"""
old_result = None
for i in range(50):
result = when.ever()
self.assertTrue(isinstance(result, datetime.datetime))
self.assertNotEqual(result, old_result)
old_result = result
def test_format(self):
"""Test when.format()"""
now = when.now()
today = when.today()
current_time = now.time()
for format_string in ('%a', '%A', '%b', '%B', '%c', '%d', '%f', '%H',
'%I', '%j', '%m', '%M', '%p', '%S', '%U', '%w',
'%W', '%x', '%X', '%y', '%Y', '%z', '%Z',
'%A, %B %d, %Y %I:%M %p'):
# Test date objects
builtin_date = now.strftime(format_string)
result_date = when.format(now, format_string)
self.assertEqual(builtin_date, result_date)
# Test datetime objects
builtin_datetime = today.strftime(format_string)
result_datetime = when.format(today, format_string)
self.assertEqual(builtin_datetime, result_datetime)
# Test time objects
builtin_time = current_time.strftime(format_string)
result_time = when.format(current_time, format_string)
self.assertEqual(builtin_time, result_time)
def test_format_typeerror(self):
"""Test TypeError raised by when.format()"""
self.assertRaises(TypeError, when.format, 'a', '%a')
def test_formats(self):
"""Test the iteration of the formats class"""
for k in when.formats:
self.assertTrue(isinstance(k, basestring))
value = getattr(when.formats, k)
locale_value = getattr(locale, value)
self.assertTrue(isinstance(locale_value, int))
def test_formats_metaclass(self):
"""Test the metaclass of the formats class"""
self.assertTrue(isinstance(when.formats, when._FormatsMetaClass))
for k in when.formats:
value = getattr(when.formats, k)
self.assertEqual(value, getattr(when._FormatsMetaClass, k))
self.assertEqual(value, when._FormatsMetaClass.__dict__[k])
def test_how_many_leap_days(self):
"""Test when.how_many_leap_days()"""
# Tests with just years
self.assertEqual(when.how_many_leap_days(2012, 2012), 0)
self.assertEqual(when.how_many_leap_days(2012, 2013), 1)
self.assertEqual(when.how_many_leap_days(2012, 2017), 2)
# Simple tests using `datetime.date`
d1 = datetime.date(2012, 1, 1)
d2 = datetime.date(2012, 2, 1)
self.assertEqual(when.how_many_leap_days(d1, d2), 0)
d1 = datetime.date(2012, 1, 1)
d2 = datetime.date(2012, 3, 1)
self.assertEqual(when.how_many_leap_days(d1, d2), 1)
d1 = datetime.date(2012, 3, 1)
d2 = datetime.date(2012, 4, 1)
self.assertEqual(when.how_many_leap_days(d1, d2), 0)
d1 = datetime.date(2012, 3, 1)
d2 = datetime.date(2016, 2, 1)
self.assertEqual(when.how_many_leap_days(d1, d2), 0)
d1 = datetime.date(2012, 3, 1)
d2 = datetime.date(2017, 2, 1)
self.assertEqual(when.how_many_leap_days(d1, d2), 1)
# Simple tests using `datetime.datetime`
dt1 = datetime.datetime(2012, 2, 28)
dt2 = datetime.datetime(2012, 2, 29)
self.assertEqual(when.how_many_leap_days(dt1, dt2), 1)
dt1 = datetime.datetime(2012, 2, 28)
dt2 = datetime.datetime(2016, 2, 28)
self.assertEqual(when.how_many_leap_days(dt1, dt2), 1)
dt1 = datetime.datetime(2012, 2, 28)
dt2 = datetime.datetime(2020, 2, 28)
self.assertEqual(when.how_many_leap_days(dt1, dt2), 2)
dt1 = datetime.datetime(2012, 2, 28)
dt2 = datetime.datetime(2020, 2, 29)
self.assertEqual(when.how_many_leap_days(dt1, dt2), 3)
dt1 = datetime.datetime(2011, 2, 28)
dt2 = datetime.datetime(2011, 3, 22)
self.assertEqual(when.how_many_leap_days(dt1, dt2), 0)
dt1 = datetime.datetime(2012, 2, 28)
dt2 = datetime.datetime(2026, 2, 28)
self.assertEqual(when.how_many_leap_days(dt1, dt2), 4)
# And a few using mixed types
d1 = datetime.date(1970, 1, 1)
dt2 = datetime.datetime(1980, 1, 1)
self.assertEqual(when.how_many_leap_days(d1, dt2), 2)
dt1 = datetime.date(1970, 1, 1)
d2 = datetime.datetime(1990, 1, 1)
self.assertEqual(when.how_many_leap_days(dt1, d2), 5)
dt1 = datetime.date(2000, 1, 1)
d2 = datetime.datetime(3000, 1, 1)
# At first glance it would appear this should be 250, except that
# years divisible by 100 are not leap years, of which there are 10,
# unless they are also divisible by 400. The years 2000, 2400,
# and 2800 need to be added back in. 250 - (10 - 3) = 243
self.assertEqual(when.how_many_leap_days(dt1, d2), 243)
def test_how_many_leap_days_typeerror(self):
"""Test TypeError raised by when.how_many_leap_days()"""
d1 = when.today()
d2 = when.yesterday()
# from_date must be valid
self.assertRaises(TypeError, when.how_many_leap_days, 'a', d2)
# to_date must be valid
self.assertRaises(TypeError, when.how_many_leap_days, d1, 'b')
def test_how_many_leap_days_valueerror(self):
"""Test ValueError raised by when.how_many_leap_days()"""
d1 = when.today()
d2 = when.yesterday()
# from_date must be before to_date
self.assertRaises(ValueError, when.how_many_leap_days, d1, d2)
def test_is_timezone_aware(self):
"""Test when.is_timezone_aware()"""
naive = when.now()
aware = naive.replace(tzinfo=pytz.UTC)
self.assertTrue(when.is_timezone_aware(aware))
self.assertFalse(when.is_timezone_aware(naive))
naive = naive.time()
aware = naive.replace(tzinfo=pytz.UTC)
self.assertTrue(when.is_timezone_aware(aware))
self.assertFalse(when.is_timezone_aware(naive))
def test_is_timezone_aware_typeerror(self):
"""Test TypeError raised by when.is_timezone_aware()"""
today = when.today()
self.assertRaises(TypeError, when.is_timezone_aware, today)
def test_is_timezone_naive(self):
"""Test when.is_timezone_naive()"""
naive = when.now()
aware = naive.replace(tzinfo=pytz.UTC)
self.assertTrue(when.is_timezone_naive(naive))
self.assertFalse(when.is_timezone_naive(aware))
naive = naive.time()
aware = naive.replace(tzinfo=pytz.UTC)
self.assertTrue(when.is_timezone_naive(naive))
self.assertFalse(when.is_timezone_naive(aware))
def test_is_timezone_naive_typeerror(self):
"""Test TypeError raised by when.is_timezone_naive()"""
today = when.today()
self.assertRaises(TypeError, when.is_timezone_aware, today)
def test_now(self):
"""Test when.now()"""
now = when.now()
utc = when.now(True)
# Unfortunately the clock keeps ticking each time we capture a value
# for now so we can't do a direct comparison with assertEqual.
# It's probably safe to assume the now function is working as long as
# the difference is less than a second. There's probably a better way
# to test this, but for now it's sufficient.
self.assertTrue(now - self.now < self.one_second)
self.assertTrue(utc - self.utc < self.one_second)
def test_set_utc(self):
"""Test when.set_utc()"""
when.set_utc()
self.assertEqual(when._FORCE_UTC, True)
def test_shift(self):
"""Test when.shift()"""
first = when.shift(self.utc, from_tz='UTC', to_tz='America/New_York')
second = when.shift(first, from_tz='America/New_York', to_tz='UTC')
self.assertNotEqual(first, second)
self.assertNotEqual(first, self.utc)
self.assertEqual(second, self.utc)
# Local time
if self.timezone in ('UTC', 'Etc/UTC'):
# This block is needed for tests run in an environment set to UTC.
first = when.shift(self.now, to_tz='America/New_York')
second = when.shift(first, from_tz='America/New_York')
else:
first = when.shift(self.now, to_tz='UTC')
second = when.shift(first, from_tz='UTC')
self.assertNotEqual(first, second)
self.assertNotEqual(first, self.now)
self.assertEqual(second, self.now)
# Set utc parameter to true
first = when.shift(self.utc, to_tz='America/New_York', utc=True)
second = when.shift(first, from_tz='America/New_York', utc=True)
self.assertNotEqual(first, second)
self.assertNotEqual(first, self.utc)
self.assertEqual(second, self.utc)
# Force UTC
when.set_utc()
first = when.shift(self.utc, to_tz='America/New_York')
second = when.shift(first, from_tz='America/New_York')
self.assertNotEqual(first, second)
self.assertNotEqual(first, self.utc)
self.assertEqual(second, self.utc)
def test_shift_typeerror(self):
"""Test TypeError raised by when.shift()"""
self.assertRaises(TypeError, when.shift, 'a')
self.assertRaises(TypeError, when.shift, when.today())
def test_shift_aware(self):
"""Test when.shift() for time zone aware datetimes"""
central = pytz.timezone('America/Chicago')
now_aware = central.localize(self.now)
# Make sure the datetime's time zone is the respected
first = when.shift(now_aware, to_tz='America/New_York')
second = when.shift(self.now, from_tz='America/Chicago', to_tz='America/New_York')
self.assertEqual(first, second)
# Also make sure the from_tz parameter is ignored
first = when.shift(now_aware, from_tz='UTC', to_tz='America/New_York')
self.assertEqual(first, second)
# Also make sure the utc parameter is ignored
first = when.shift(now_aware, to_tz='America/New_York', utc=True)
self.assertEqual(first, second)
def test_timezone(self):
"""Test when.timezone()"""
self.assertEqual(when.timezone(), self.timezone)
def test_timezone_object(self):
"""Test when.timezone_object()"""
local_timezone = pytz.timezone(self.timezone)
self.assertEqual(when.timezone_object(), local_timezone)
def test_today(self):
"""Test when.today()"""
self.assertEqual(when.today(), self.today)
def test_tomorrow(self):
"""Test when.tomorrow()"""
self.assertEqual(when.tomorrow(), self.today + self.one_day)
def test_unset_utc(self):
"""Test when.unset_utc()"""
when.unset_utc()
self.assertEqual(when._FORCE_UTC, False)
def test_yesterday(self):
"""Test when.yesterday()"""
self.assertEqual(when.yesterday(), self.today - self.one_day)
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from unittest.mock import Mock
from twisted.internet import defer
from synapse.appservice import ApplicationService, Namespace
from tests import unittest
from tests.test_utils import simple_async_mock
def _regex(regex: str, exclusive: bool = True) -> Namespace:
return Namespace(exclusive, None, re.compile(regex))
class ApplicationServiceTestCase(unittest.TestCase):
def setUp(self):
self.service = ApplicationService(
id="unique_identifier",
sender="@as:test",
url="some_url",
token="some_token",
hostname="matrix.org", # only used by get_groups_for_user
)
self.event = Mock(
event_id="$abc:xyz",
type="m.something",
room_id="!foo:bar",
sender="@someone:somewhere",
)
self.store = Mock()
self.store.get_aliases_for_room = simple_async_mock([])
self.store.get_users_in_room = simple_async_mock([])
@defer.inlineCallbacks
def test_regex_user_id_prefix_match(self):
self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
self.event.sender = "@irc_foobar:matrix.org"
self.assertTrue(
(
yield defer.ensureDeferred(
self.service.is_interested_in_event(
self.event.event_id, self.event, self.store
)
)
)
)
@defer.inlineCallbacks
def test_regex_user_id_prefix_no_match(self):
self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
self.event.sender = "@someone_else:matrix.org"
self.assertFalse(
(
yield defer.ensureDeferred(
self.service.is_interested_in_event(
self.event.event_id, self.event, self.store
)
)
)
)
@defer.inlineCallbacks
def test_regex_room_member_is_checked(self):
self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
self.event.sender = "@someone_else:matrix.org"
self.event.type = "m.room.member"
self.event.state_key = "@irc_foobar:matrix.org"
self.assertTrue(
(
yield defer.ensureDeferred(
self.service.is_interested_in_event(
self.event.event_id, self.event, self.store
)
)
)
)
@defer.inlineCallbacks
def test_regex_room_id_match(self):
self.service.namespaces[ApplicationService.NS_ROOMS].append(
_regex("!some_prefix.*some_suffix:matrix.org")
)
self.event.room_id = "!some_prefixs0m3th1nGsome_suffix:matrix.org"
self.assertTrue(
(
yield defer.ensureDeferred(
self.service.is_interested_in_event(
self.event.event_id, self.event, self.store
)
)
)
)
@defer.inlineCallbacks
def test_regex_room_id_no_match(self):
self.service.namespaces[ApplicationService.NS_ROOMS].append(
_regex("!some_prefix.*some_suffix:matrix.org")
)
self.event.room_id = "!XqBunHwQIXUiqCaoxq:matrix.org"
self.assertFalse(
(
yield defer.ensureDeferred(
self.service.is_interested_in_event(
self.event.event_id, self.event, self.store
)
)
)
)
@defer.inlineCallbacks
def test_regex_alias_match(self):
self.service.namespaces[ApplicationService.NS_ALIASES].append(
_regex("#irc_.*:matrix.org")
)
self.store.get_aliases_for_room = simple_async_mock(
["#irc_foobar:matrix.org", "#athing:matrix.org"]
)
self.store.get_users_in_room = simple_async_mock([])
self.assertTrue(
(
yield defer.ensureDeferred(
self.service.is_interested_in_event(
self.event.event_id, self.event, self.store
)
)
)
)
def test_non_exclusive_alias(self):
self.service.namespaces[ApplicationService.NS_ALIASES].append(
_regex("#irc_.*:matrix.org", exclusive=False)
)
self.assertFalse(self.service.is_exclusive_alias("#irc_foobar:matrix.org"))
def test_non_exclusive_room(self):
self.service.namespaces[ApplicationService.NS_ROOMS].append(
_regex("!irc_.*:matrix.org", exclusive=False)
)
self.assertFalse(self.service.is_exclusive_room("!irc_foobar:matrix.org"))
def test_non_exclusive_user(self):
self.service.namespaces[ApplicationService.NS_USERS].append(
_regex("@irc_.*:matrix.org", exclusive=False)
)
self.assertFalse(self.service.is_exclusive_user("@irc_foobar:matrix.org"))
def test_exclusive_alias(self):
self.service.namespaces[ApplicationService.NS_ALIASES].append(
_regex("#irc_.*:matrix.org", exclusive=True)
)
self.assertTrue(self.service.is_exclusive_alias("#irc_foobar:matrix.org"))
def test_exclusive_user(self):
self.service.namespaces[ApplicationService.NS_USERS].append(
_regex("@irc_.*:matrix.org", exclusive=True)
)
self.assertTrue(self.service.is_exclusive_user("@irc_foobar:matrix.org"))
def test_exclusive_room(self):
self.service.namespaces[ApplicationService.NS_ROOMS].append(
_regex("!irc_.*:matrix.org", exclusive=True)
)
self.assertTrue(self.service.is_exclusive_room("!irc_foobar:matrix.org"))
@defer.inlineCallbacks
def test_regex_alias_no_match(self):
self.service.namespaces[ApplicationService.NS_ALIASES].append(
_regex("#irc_.*:matrix.org")
)
self.store.get_aliases_for_room = simple_async_mock(
["#xmpp_foobar:matrix.org", "#athing:matrix.org"]
)
self.store.get_users_in_room = simple_async_mock([])
self.assertFalse(
(
yield defer.ensureDeferred(
self.service.is_interested_in_event(
self.event.event_id, self.event, self.store
)
)
)
)
@defer.inlineCallbacks
def test_regex_multiple_matches(self):
self.service.namespaces[ApplicationService.NS_ALIASES].append(
_regex("#irc_.*:matrix.org")
)
self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
self.event.sender = "@irc_foobar:matrix.org"
self.store.get_aliases_for_room = simple_async_mock(["#irc_barfoo:matrix.org"])
self.store.get_users_in_room = simple_async_mock([])
self.assertTrue(
(
yield defer.ensureDeferred(
self.service.is_interested_in_event(
self.event.event_id, self.event, self.store
)
)
)
)
@defer.inlineCallbacks
def test_interested_in_self(self):
# make sure invites get through
self.service.sender = "@appservice:name"
self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
self.event.type = "m.room.member"
self.event.content = {"membership": "invite"}
self.event.state_key = self.service.sender
self.assertTrue(
(
yield defer.ensureDeferred(
self.service.is_interested_in_event(
self.event.event_id, self.event, self.store
)
)
)
)
@defer.inlineCallbacks
def test_member_list_match(self):
self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*"))
# Note that @irc_fo:here is the AS user.
self.store.get_users_in_room = simple_async_mock(
["@alice:here", "@irc_fo:here", "@bob:here"]
)
self.store.get_aliases_for_room = simple_async_mock([])
self.event.sender = "@xmpp_foobar:matrix.org"
self.assertTrue(
(
yield defer.ensureDeferred(
self.service.is_interested_in_event(
self.event.event_id, self.event, self.store
)
)
)
)
| |
# Natural Language Toolkit: Glue Semantics
#
# Author: Dan Garrette <dhgarrette@gmail.com>
#
# Copyright (C) 2001-2015 NLTK Project
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, division, unicode_literals
import os
import nltk
from nltk.internals import Counter
from nltk.compat import string_types
from nltk.tag import UnigramTagger, BigramTagger, TrigramTagger, RegexpTagger
from nltk.sem.logic import (Expression, Variable, VariableExpression,
LambdaExpression, AbstractVariableExpression)
from nltk.compat import python_2_unicode_compatible
from nltk.sem import drt
from nltk.sem import linearlogic
SPEC_SEMTYPES = {'a' : 'ex_quant',
'an' : 'ex_quant',
'every' : 'univ_quant',
'the' : 'def_art',
'no' : 'no_quant',
'default' : 'ex_quant'}
OPTIONAL_RELATIONSHIPS = ['nmod', 'vmod', 'punct']
@python_2_unicode_compatible
class GlueFormula(object):
def __init__(self, meaning, glue, indices=None):
if not indices:
indices = set()
if isinstance(meaning, string_types):
self.meaning = Expression.fromstring(meaning)
elif isinstance(meaning, Expression):
self.meaning = meaning
else:
raise RuntimeError('Meaning term neither string or expression: %s, %s' % (meaning, meaning.__class__))
if isinstance(glue, string_types):
self.glue = linearlogic.LinearLogicParser().parse(glue)
elif isinstance(glue, linearlogic.Expression):
self.glue = glue
else:
raise RuntimeError('Glue term neither string or expression: %s, %s' % (glue, glue.__class__))
self.indices = indices
def applyto(self, arg):
""" self = (\\x.(walk x), (subj -o f))
arg = (john , subj)
returns ((walk john), f)
"""
if self.indices & arg.indices: # if the sets are NOT disjoint
raise linearlogic.LinearLogicApplicationException("'%s' applied to '%s'. Indices are not disjoint." % (self, arg))
else: # if the sets ARE disjoint
return_indices = (self.indices | arg.indices)
try:
return_glue = linearlogic.ApplicationExpression(self.glue, arg.glue, arg.indices)
except linearlogic.LinearLogicApplicationException:
raise linearlogic.LinearLogicApplicationException("'%s' applied to '%s'" % (self.simplify(), arg.simplify()))
arg_meaning_abstracted = arg.meaning
if return_indices:
for dep in self.glue.simplify().antecedent.dependencies[::-1]: # if self.glue is (A -o B), dep is in A.dependencies
arg_meaning_abstracted = self.make_LambdaExpression(Variable('v%s' % dep),
arg_meaning_abstracted)
return_meaning = self.meaning.applyto(arg_meaning_abstracted)
return self.__class__(return_meaning, return_glue, return_indices)
def make_VariableExpression(self, name):
return VariableExpression(name)
def make_LambdaExpression(self, variable, term):
return LambdaExpression(variable, term)
def lambda_abstract(self, other):
assert isinstance(other, GlueFormula)
assert isinstance(other.meaning, AbstractVariableExpression)
return self.__class__(self.make_LambdaExpression(other.meaning.variable,
self.meaning),
linearlogic.ImpExpression(other.glue, self.glue))
def compile(self, counter=None):
"""From Iddo Lev's PhD Dissertation p108-109"""
if not counter:
counter = Counter()
(compiled_glue, new_forms) = self.glue.simplify().compile_pos(counter, self.__class__)
return new_forms + [self.__class__(self.meaning, compiled_glue, set([counter.get()]))]
def simplify(self):
return self.__class__(self.meaning.simplify(), self.glue.simplify(), self.indices)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.meaning == other.meaning and self.glue == other.glue
def __ne__(self, other):
return not self == other
# sorting for use in doctests which must be deterministic
def __lt__(self, other):
return str(self) < str(other)
def __str__(self):
assert isinstance(self.indices, set)
accum = '%s : %s' % (self.meaning, self.glue)
if self.indices:
accum += ' : {' + ', '.join(str(index) for index in self.indices) + '}'
return accum
def __repr__(self):
return "%s" % self
@python_2_unicode_compatible
class GlueDict(dict):
def __init__(self, filename, encoding=None):
self.filename = filename
self.file_encoding = encoding
self.read_file()
def read_file(self, empty_first=True):
if empty_first:
self.clear()
try:
contents = nltk.data.load(self.filename, format='text', encoding=self.file_encoding)
# TODO: the above can't handle zip files, but this should anyway be fixed in nltk.data.load()
except LookupError as e:
try:
contents = nltk.data.load('file:' + self.filename, format='text', encoding=self.file_encoding)
except LookupError:
raise e
lines = contents.splitlines()
for line in lines: # example: 'n : (\\x.(<word> x), (v-or))'
# lambdacalc -^ linear logic -^
line = line.strip() # remove trailing newline
if not len(line): continue # skip empty lines
if line[0] == '#': continue # skip commented out lines
parts = line.split(' : ', 2) # ['verb', '(\\x.(<word> x), ( subj -o f ))', '[subj]']
glue_formulas = []
paren_count = 0
tuple_start = 0
tuple_comma = 0
relationships = None
if len(parts) > 1:
for (i, c) in enumerate(parts[1]):
if c == '(':
if paren_count == 0: # if it's the first '(' of a tuple
tuple_start = i+1 # then save the index
paren_count += 1
elif c == ')':
paren_count -= 1
if paren_count == 0: # if it's the last ')' of a tuple
meaning_term = parts[1][tuple_start:tuple_comma] # '\\x.(<word> x)'
glue_term = parts[1][tuple_comma+1:i] # '(v-r)'
glue_formulas.append([meaning_term, glue_term]) # add the GlueFormula to the list
elif c == ',':
if paren_count == 1: # if it's a comma separating the parts of the tuple
tuple_comma = i # then save the index
elif c == '#': # skip comments at the ends of lines
if paren_count != 0: # if the line hasn't parsed correctly so far
raise RuntimeError('Formula syntax is incorrect for entry ' + line)
break # break to the next line
if len(parts) > 2: #if there is a relationship entry at the end
rel_start = parts[2].index('[')+1
rel_end = parts[2].index(']')
if rel_start == rel_end:
relationships = frozenset()
else:
relationships = frozenset(r.strip() for r in parts[2][rel_start:rel_end].split(','))
try:
start_inheritance = parts[0].index('(')
end_inheritance = parts[0].index(')')
sem = parts[0][:start_inheritance].strip()
supertype = parts[0][start_inheritance+1:end_inheritance]
except:
sem = parts[0].strip()
supertype = None
if sem not in self:
self[sem] = {}
if relationships is None: #if not specified for a specific relationship set
#add all relationship entries for parents
if supertype:
for rels in self[supertype]:
if rels not in self[sem]:
self[sem][rels] = []
glue = self[supertype][rels]
self[sem][rels].extend(glue)
self[sem][rels].extend(glue_formulas) # add the glue formulas to every rel entry
else:
if None not in self[sem]:
self[sem][None] = []
self[sem][None].extend(glue_formulas) # add the glue formulas to every rel entry
else:
if relationships not in self[sem]:
self[sem][relationships] = []
if supertype:
self[sem][relationships].extend(self[supertype][relationships])
self[sem][relationships].extend(glue_formulas) # add the glue entry to the dictionary
def __str__(self):
accum = ''
for pos in self:
str_pos = "%s" % pos
for relset in self[pos]:
i = 1
for gf in self[pos][relset]:
if i == 1:
accum += str_pos + ': '
else:
accum += ' '*(len(str_pos)+2)
accum += "%s" % gf
if relset and i == len(self[pos][relset]):
accum += ' : %s' % relset
accum += '\n'
i += 1
return accum
def to_glueformula_list(self, depgraph, node=None, counter=None, verbose=False):
if node is None:
# TODO: should it be depgraph.root? Is this code tested?
top = depgraph.nodes[0]
depList = sum(list(top['deps'].values()), [])
root = depgraph.nodes[depList[0]]
return self.to_glueformula_list(depgraph, root, Counter(), verbose)
glueformulas = self.lookup(node, depgraph, counter)
for dep_idx in sum(list(node['deps'].values()), []):
dep = depgraph.nodes[dep_idx]
glueformulas.extend(self.to_glueformula_list(depgraph, dep, counter, verbose))
return glueformulas
def lookup(self, node, depgraph, counter):
semtype_names = self.get_semtypes(node)
semtype = None
for name in semtype_names:
if name in self:
semtype = self[name]
break
if semtype is None:
# raise KeyError, "There is no GlueDict entry for sem type '%s' (for '%s')" % (sem, word)
return []
self.add_missing_dependencies(node, depgraph)
lookup = self._lookup_semtype_option(semtype, node, depgraph)
if not len(lookup):
raise KeyError(
"There is no GlueDict entry for sem type of '%s' "
"with tag '%s', and rel '%s'" %
(node['word'], node['tag'], node['rel'])
)
return self.get_glueformulas_from_semtype_entry(lookup, node['word'], node, depgraph, counter)
def add_missing_dependencies(self, node, depgraph):
rel = node['rel'].lower()
if rel == 'main':
headnode = depgraph.nodes[node['head']]
subj = self.lookup_unique('subj', headnode, depgraph)
relation = subj['rel']
node['deps'].setdefault(relation,[])
node['deps'][relation].append(subj['address'])
#node['deps'].append(subj['address'])
def _lookup_semtype_option(self, semtype, node, depgraph):
relationships = frozenset(
depgraph.nodes[dep]['rel'].lower()
for dep in sum(list(node['deps'].values()), [])
if depgraph.nodes[dep]['rel'].lower() not in OPTIONAL_RELATIONSHIPS
)
try:
lookup = semtype[relationships]
except KeyError:
# An exact match is not found, so find the best match where
# 'best' is defined as the glue entry whose relationship set has the
# most relations of any possible relationship set that is a subset
# of the actual depgraph
best_match = frozenset()
for relset_option in set(semtype)-set([None]):
if len(relset_option) > len(best_match) and \
relset_option < relationships:
best_match = relset_option
if not best_match:
if None in semtype:
best_match = None
else:
return None
lookup = semtype[best_match]
return lookup
def get_semtypes(self, node):
"""
Based on the node, return a list of plausible semtypes in order of
plausibility.
"""
rel = node['rel'].lower()
word = node['word'].lower()
if rel == 'spec':
if word in SPEC_SEMTYPES:
return [SPEC_SEMTYPES[word]]
else:
return [SPEC_SEMTYPES['default']]
elif rel in ['nmod', 'vmod']:
return [node['tag'], rel]
else:
return [node['tag']]
def get_glueformulas_from_semtype_entry(self, lookup, word, node, depgraph, counter):
glueformulas = []
glueFormulaFactory = self.get_GlueFormula_factory()
for meaning, glue in lookup:
gf = glueFormulaFactory(self.get_meaning_formula(meaning, word), glue)
if not len(glueformulas):
gf.word = word
else:
gf.word = '%s%s' % (word, len(glueformulas)+1)
gf.glue = self.initialize_labels(gf.glue, node, depgraph, counter.get())
glueformulas.append(gf)
return glueformulas
def get_meaning_formula(self, generic, word):
"""
:param generic: A meaning formula string containing the
parameter "<word>"
:param word: The actual word to be replace "<word>"
"""
word = word.replace('.', '')
return generic.replace('<word>', word)
def initialize_labels(self, expr, node, depgraph, unique_index):
if isinstance(expr, linearlogic.AtomicExpression):
name = self.find_label_name(expr.name.lower(), node, depgraph, unique_index)
if name[0].isupper():
return linearlogic.VariableExpression(name)
else:
return linearlogic.ConstantExpression(name)
else:
return linearlogic.ImpExpression(
self.initialize_labels(expr.antecedent, node, depgraph, unique_index),
self.initialize_labels(expr.consequent, node, depgraph, unique_index)
)
def find_label_name(self, name, node, depgraph, unique_index):
try:
dot = name.index('.')
before_dot = name[:dot]
after_dot = name[dot+1:]
if before_dot == 'super':
return self.find_label_name(after_dot, depgraph.nodes[node['head']], depgraph, unique_index)
else:
return self.find_label_name(after_dot, self.lookup_unique(before_dot, node, depgraph), depgraph, unique_index)
except ValueError:
lbl = self.get_label(node)
if name == 'f':
return lbl
elif name == 'v':
return '%sv' % lbl
elif name == 'r':
return '%sr' % lbl
elif name == 'super':
return self.get_label(depgraph.nodes[node['head']])
elif name == 'var':
return '%s%s' % (lbl.upper(), unique_index)
elif name == 'a':
return self.get_label(self.lookup_unique('conja', node, depgraph))
elif name == 'b':
return self.get_label(self.lookup_unique('conjb', node, depgraph))
else:
return self.get_label(self.lookup_unique(name, node, depgraph))
def get_label(self, node):
"""
Pick an alphabetic character as identifier for an entity in the model.
:param value: where to index into the list of characters
:type value: int
"""
value = node['address']
letter = ['f','g','h','i','j','k','l','m','n','o','p','q','r','s',
't','u','v','w','x','y','z','a','b','c','d','e'][value-1]
num = int(value) // 26
if num > 0:
return letter + str(num)
else:
return letter
def lookup_unique(self, rel, node, depgraph):
"""
Lookup 'key'. There should be exactly one item in the associated relation.
"""
deps = [
depgraph.nodes[dep]
for dep in sum(list(node['deps'].values()), [])
if depgraph.nodes[dep]['rel'].lower() == rel.lower()
]
if len(deps) == 0:
raise KeyError("'%s' doesn't contain a feature '%s'" % (node['word'], rel))
elif len(deps) > 1:
raise KeyError("'%s' should only have one feature '%s'" % (node['word'], rel))
else:
return deps[0]
def get_GlueFormula_factory(self):
return GlueFormula
class Glue(object):
def __init__(self, semtype_file=None, remove_duplicates=False,
depparser=None, verbose=False):
self.verbose = verbose
self.remove_duplicates = remove_duplicates
self.depparser = depparser
from nltk import Prover9
self.prover = Prover9()
if semtype_file:
self.semtype_file = semtype_file
else:
self.semtype_file = os.path.join('grammars', 'sample_grammars','glue.semtype')
def train_depparser(self, depgraphs=None):
if depgraphs:
self.depparser.train(depgraphs)
else:
self.depparser.train_from_file(nltk.data.find(
os.path.join('grammars', 'sample_grammars',
'glue_train.conll')))
def parse_to_meaning(self, sentence):
readings = []
for agenda in self.parse_to_compiled(sentence):
readings.extend(self.get_readings(agenda))
return readings
def get_readings(self, agenda):
readings = []
agenda_length = len(agenda)
atomics = dict()
nonatomics = dict()
while agenda: # is not empty
cur = agenda.pop()
glue_simp = cur.glue.simplify()
if isinstance(glue_simp, linearlogic.ImpExpression): # if cur.glue is non-atomic
for key in atomics:
try:
if isinstance(cur.glue, linearlogic.ApplicationExpression):
bindings = cur.glue.bindings
else:
bindings = linearlogic.BindingDict()
glue_simp.antecedent.unify(key, bindings)
for atomic in atomics[key]:
if not (cur.indices & atomic.indices): # if the sets of indices are disjoint
try:
agenda.append(cur.applyto(atomic))
except linearlogic.LinearLogicApplicationException:
pass
except linearlogic.UnificationException:
pass
try:
nonatomics[glue_simp.antecedent].append(cur)
except KeyError:
nonatomics[glue_simp.antecedent] = [cur]
else: # else cur.glue is atomic
for key in nonatomics:
for nonatomic in nonatomics[key]:
try:
if isinstance(nonatomic.glue, linearlogic.ApplicationExpression):
bindings = nonatomic.glue.bindings
else:
bindings = linearlogic.BindingDict()
glue_simp.unify(key, bindings)
if not (cur.indices & nonatomic.indices): # if the sets of indices are disjoint
try:
agenda.append(nonatomic.applyto(cur))
except linearlogic.LinearLogicApplicationException:
pass
except linearlogic.UnificationException:
pass
try:
atomics[glue_simp].append(cur)
except KeyError:
atomics[glue_simp] = [cur]
for entry in atomics:
for gf in atomics[entry]:
if len(gf.indices) == agenda_length:
self._add_to_reading_list(gf, readings)
for entry in nonatomics:
for gf in nonatomics[entry]:
if len(gf.indices) == agenda_length:
self._add_to_reading_list(gf, readings)
return readings
def _add_to_reading_list(self, glueformula, reading_list):
add_reading = True
if self.remove_duplicates:
for reading in reading_list:
try:
if reading.equiv(glueformula.meaning, self.prover):
add_reading = False
break
except Exception as e:
#if there is an exception, the syntax of the formula
#may not be understandable by the prover, so don't
#throw out the reading.
print('Error when checking logical equality of statements', e)
pass
if add_reading:
reading_list.append(glueformula.meaning)
def parse_to_compiled(self, sentence):
gfls = [self.depgraph_to_glue(dg) for dg in self.dep_parse(sentence)]
return [self.gfl_to_compiled(gfl) for gfl in gfls]
def dep_parse(self, sentence):
"""
Return a dependency graph for the sentence.
:param sentence: the sentence to be parsed
:type sentence: list(str)
:rtype: DependencyGraph
"""
#Lazy-initialize the depparser
if self.depparser is None:
from nltk.parse import MaltParser
self.depparser = MaltParser(tagger=self.get_pos_tagger())
if not self.depparser._trained:
self.train_depparser()
return self.depparser.parse(sentence, verbose=self.verbose)
def depgraph_to_glue(self, depgraph):
return self.get_glue_dict().to_glueformula_list(depgraph)
def get_glue_dict(self):
return GlueDict(self.semtype_file)
def gfl_to_compiled(self, gfl):
index_counter = Counter()
return_list = []
for gf in gfl:
return_list.extend(gf.compile(index_counter))
if self.verbose:
print('Compiled Glue Premises:')
for cgf in return_list:
print(cgf)
return return_list
def get_pos_tagger(self):
from nltk.corpus import brown
regexp_tagger = RegexpTagger(
[(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), # cardinal numbers
(r'(The|the|A|a|An|an)$', 'AT'), # articles
(r'.*able$', 'JJ'), # adjectives
(r'.*ness$', 'NN'), # nouns formed from adjectives
(r'.*ly$', 'RB'), # adverbs
(r'.*s$', 'NNS'), # plural nouns
(r'.*ing$', 'VBG'), # gerunds
(r'.*ed$', 'VBD'), # past tense verbs
(r'.*', 'NN') # nouns (default)
])
brown_train = brown.tagged_sents(categories='news')
unigram_tagger = UnigramTagger(brown_train, backoff=regexp_tagger)
bigram_tagger = BigramTagger(brown_train, backoff=unigram_tagger)
trigram_tagger = TrigramTagger(brown_train, backoff=bigram_tagger)
#Override particular words
main_tagger = RegexpTagger(
[(r'(A|a|An|an)$', 'ex_quant'),
(r'(Every|every|All|all)$', 'univ_quant')
], backoff=trigram_tagger)
return main_tagger
class DrtGlueFormula(GlueFormula):
def __init__(self, meaning, glue, indices=None):
if not indices:
indices = set()
if isinstance(meaning, string_types):
self.meaning = drt.DrtExpression.fromstring(meaning)
elif isinstance(meaning, drt.DrtExpression):
self.meaning = meaning
else:
raise RuntimeError('Meaning term neither string or expression: %s, %s' % (meaning, meaning.__class__))
if isinstance(glue, string_types):
self.glue = linearlogic.LinearLogicParser().parse(glue)
elif isinstance(glue, linearlogic.Expression):
self.glue = glue
else:
raise RuntimeError('Glue term neither string or expression: %s, %s' % (glue, glue.__class__))
self.indices = indices
def make_VariableExpression(self, name):
return drt.DrtVariableExpression(name)
def make_LambdaExpression(self, variable, term):
return drt.DrtLambdaExpression(variable, term)
class DrtGlueDict(GlueDict):
def get_GlueFormula_factory(self):
return DrtGlueFormula
class DrtGlue(Glue):
def __init__(self, semtype_file=None, remove_duplicates=False,
depparser=None, verbose=False):
if not semtype_file:
semtype_file = os.path.join('grammars', 'sample_grammars','drt_glue.semtype')
Glue.__init__(self, semtype_file, remove_duplicates, depparser, verbose)
def get_glue_dict(self):
return DrtGlueDict(self.semtype_file)
def demo(show_example=-1):
from nltk.parse import MaltParser
examples = ['David sees Mary',
'David eats a sandwich',
'every man chases a dog',
'every man believes a dog sleeps',
'John gives David a sandwich',
'John chases himself']
# 'John persuades David to order a pizza',
# 'John tries to go',
# 'John tries to find a unicorn',
# 'John seems to vanish',
# 'a unicorn seems to approach',
# 'every big cat leaves',
# 'every gray cat leaves',
# 'every big gray cat leaves',
# 'a former senator leaves',
print('============== DEMO ==============')
tagger = RegexpTagger(
[('^(David|Mary|John)$', 'NNP'),
('^(sees|eats|chases|believes|gives|sleeps|chases|persuades|tries|seems|leaves)$', 'VB'),
('^(go|order|vanish|find|approach)$', 'VB'),
('^(a)$', 'ex_quant'),
('^(every)$', 'univ_quant'),
('^(sandwich|man|dog|pizza|unicorn|cat|senator)$', 'NN'),
('^(big|gray|former)$', 'JJ'),
('^(him|himself)$', 'PRP')
])
depparser = MaltParser(tagger=tagger)
glue = Glue(depparser=depparser, verbose=False)
for (i, sentence) in enumerate(examples):
if i==show_example or show_example==-1:
print('[[[Example %s]]] %s' % (i, sentence))
for reading in glue.parse_to_meaning(sentence.split()):
print(reading.simplify())
print('')
if __name__ == '__main__':
demo()
| |
# Brain Tumor Classification
# Train 3D Multi-Scale CNN.
# Author: Qixun QU
# Copyleft: MIT Licience
# ,,, ,,,
# ;" '; ;' ",
# ; @.ss$$$$$$s.@ ;
# `s$$$$$$$$$$$$$$$'
# $$$$$$$$$$$$$$$$$$
# $$$$P""Y$$$Y""W$$$$$
# $$$$ p"$$$"q $$$$$
# $$$$ .$$$$$. $$$$'
# $$$DaU$$O$$DaU$$$'
# '$$$$'.^.'$$$$'
# '&$$$$$&'
from __future__ import print_function
import os
import json
import shutil
import argparse
from btc_models import BTCModels
from keras import backend as K
from keras.optimizers import Adam
from keras.callbacks import (CSVLogger,
TensorBoard,
ModelCheckpoint,
LearningRateScheduler)
class BTCTrain(object):
def __init__(self,
paras_name,
paras_json_path,
weights_save_dir,
logs_save_dir,
save_best_weights=True):
'''__INIT__
Initalization before training model.
Inputs:
-------
- paras_name: string, name of hyperparameters set,
can be found in hyper_paras.json.
- paras_json_path: string, path of file which provides
hyperparamters, "hyper_paras.json"
in this project.
- weights_save_dir: string, directory path where saves
trained model.
- logs_save_dir: string, directory path where saves
logs of training process.
- save_best_weights: boolean, if save the model with best
validation accuracy. Default is True.
'''
# Dataset: training, validation and test
self.data = None
# If save the model which provides best validation accuracy
self.save_best_weights = save_best_weights
# Load hyperparameters
self.paras = self.load_paras(paras_json_path, paras_name)
self._load_paras()
# Create folder for saving weights
self.weights_dir = os.path.join(weights_save_dir, paras_name)
self.create_dir(self.weights_dir)
# Create folder for saving training logs
self.logs_dir = os.path.join(logs_save_dir, paras_name)
self.create_dir(self.logs_dir)
# Initialize files' names for weights at last or best epoch
self.last_weights_path = os.path.join(self.weights_dir, "last.h5")
self.best_weights_path = os.path.join(self.weights_dir, "best.h5")
# CSV file path for writing learning curves
self.curves_path = os.path.join(self.logs_dir, "curves.csv")
return
def _load_paras(self):
'''_LOAD_PARAS
Load hyperparameters from hyper_paras.json.
'''
# Parameters to construct model
self.model_name = self.paras["model_name"]
self.input_shape = self.paras["input_shape"]
self.pooling = self.paras["pooling"]
self.l2_coeff = self.paras["l2_coeff"]
self.drop_rate = self.paras["drop_rate"]
self.bn_momentum = self.paras["bn_momentum"]
self.initializer = self.paras["initializer"]
# Parameters to train model
self.optimizer = self.paras["optimizer"]
self.lr_start = self.paras["lr_start"]
self.epochs_num = self.paras["epochs_num"]
self.batch_size = self.paras["batch_size"]
return
def _load_model(self):
'''_LOAD_MODEL
Create 3D Multi-Scale CNN.
'''
self.model = BTCModels(model_name=self.model_name,
input_shape=self.input_shape,
pooling=self.pooling,
l2_coeff=self.l2_coeff,
drop_rate=self.drop_rate,
bn_momentum=self.bn_momentum,
initializer=self.initializer).model
return
def _set_optimizer(self):
'''_SET_OPTIMIZER
Set optimizer according to the given parameter.
Use "Adam" in this project.
'''
if self.optimizer == "adam":
self.opt_fcn = Adam(lr=self.lr_start)
return
def _set_lr_scheduler(self, epoch):
'''_SET_LR_SCHEDULER
Learning rate scheduler for training process.
LR: [init] * 40 + [init * 0.1] * 30 + [init * 0.01] * 30
Input:
------
- epoch: int, nth training epoch.
Output:
-------
- Learning rate, a float, for nth training epoch.
'''
lrs = [self.lr_start] * 40 + \
[self.lr_start * 0.1] * 30 + \
[self.lr_start * 0.01] * 30
print("Learning rate:", lrs[epoch])
return lrs[epoch]
def _set_callbacks(self):
'''_SET_CALLBACKS
Set callback functions while training model.
-1- Save learning curves while training.
-2- Set learning rate scheduler.
-3- Add support for TensorBoard.
-4- Save best model while training. (optional)
'''
# Save learning curves in csv file while training
csv_logger = CSVLogger(self.curves_path,
append=True, separator=",")
# Set learning rate scheduler
lr_scheduler = LearningRateScheduler(self._set_lr_scheduler)
# Add support for TensorBoard
tb = TensorBoard(log_dir=self.logs_dir,
batch_size=self.batch_size)
self.callbacks = [csv_logger, lr_scheduler, tb]
if self.save_best_weights:
# Save best model while training
checkpoint = ModelCheckpoint(filepath=self.best_weights_path,
monitor="val_loss",
verbose=0,
save_best_only=True)
self.callbacks += [checkpoint]
return
def _print_score(self):
'''_PRINT_SCORE
Print out metrics (loss and accuracy) of
training, validation and testing set.
'''
# Helper function to compute and print metrics
def evaluate(x, y, data_str):
score = self.model.evaluate(x, y, self.batch_size, 0)
print(data_str + " Set: Loss: {0:.4f}, Accuracy: {1:.4f}".format(
score[0], score[1]))
return
evaluate(self.data.train_x, self.data.train_y, "Training")
evaluate(self.data.valid_x, self.data.valid_y, "Validation")
evaluate(self.data.test_x, self.data.test_y, "Testing")
return
def run(self, data):
'''RUN
Train model using given data.
Input:
------
- data: an BTCDataset instance, including features and
labels of training, validation and testing set.
'''
print("\nTraining the model.\n")
self.data = data
# Configurations of model and optimizer
self._load_model()
self._set_optimizer()
# Compile model and print its structure
self.model.compile(loss="categorical_crossentropy",
optimizer=self.opt_fcn,
metrics=["accuracy"])
self.model.summary()
self._set_callbacks()
# Train model
self.model.fit(self.data.train_x, self.data.train_y,
batch_size=self.batch_size,
epochs=self.epochs_num,
validation_data=(self.data.valid_x,
self.data.valid_y),
shuffle=True,
callbacks=self.callbacks)
# Save model in last epoch
self.model.save(self.last_weights_path)
# Print metrics
self._print_score()
# Destroy the current TF graph
K.clear_session()
return
@staticmethod
def load_paras(paras_json_path, paras_name):
'''LOAD_PARAS
Load heperparameters from json file.
See hyper_paras.json.
Inputs:
-------
- paras_name: string, name of hyperparameters set,
can be found in hyper_paras.json.
- paras_json_path: string, path of file which provides
hyperparamters, "hyper_paras.json"
in this project.
Output:
-------
- A dictionay pf hyperparameters.
'''
paras = json.load(open(paras_json_path))
return paras[paras_name]
@staticmethod
def create_dir(dir_path, rm=True):
'''CREATE_DIR
Create directory.
Inputs:
-------
- dir_path: string, path of new directory.
- rm: boolean, remove existing directory or not.
'''
if os.path.isdir(dir_path):
if rm:
shutil.rmtree(dir_path)
os.makedirs(dir_path)
else:
os.makedirs(dir_path)
return
def main(hyper_paras_name):
'''MAIN
Main process to train model.
Inputs:
-------
- hyper_paras_name: string, the name of hyperparameters set,
which can be found in hyper_paras.json.
'''
from btc_dataset import BTCDataset
# Basic settings in pre_paras.json, including
# 1. directory paths for input and output
# 2. necessary information for splitting dataset
pre_paras_path = "pre_paras.json"
pre_paras = json.load(open(pre_paras_path))
# Get root path of input data
parent_dir = os.path.dirname(os.getcwd())
data_dir = os.path.join(parent_dir, pre_paras["data_dir"])
# Set directories of preprocessed images
hgg_dir = os.path.join(data_dir, pre_paras["hgg_out"])
lgg_dir = os.path.join(data_dir, pre_paras["lgg_out"])
# Set directory to save weights
weights_save_dir = os.path.join(parent_dir, pre_paras["weights_save_dir"])
# Set directory to save training and validation logs
logs_save_dir = os.path.join(parent_dir, pre_paras["logs_save_dir"])
# Partition dataset
data = BTCDataset(hgg_dir, lgg_dir,
volume_type=pre_paras["volume_type"],
pre_trainset_path=pre_paras["pre_trainset_path"],
pre_validset_path=pre_paras["pre_validset_path"],
pre_testset_path=pre_paras["pre_testset_path"],
data_format=pre_paras["data_format"])
data.run(pre_split=pre_paras["pre_split"],
save_split=pre_paras["save_split"],
save_split_dir=pre_paras["save_split_dir"])
# Train the model
train = BTCTrain(paras_name=hyper_paras_name,
paras_json_path=pre_paras["paras_json_path"],
weights_save_dir=weights_save_dir,
logs_save_dir=logs_save_dir,
save_best_weights=pre_paras["save_best_weights"])
train.run(data)
if __name__ == "__main__":
# Command line
# python btc_train.py --paras=paras-1
parser = argparse.ArgumentParser()
# Set json file path to extract hyperparameters
help_str = "Select a set of hyper-parameters in hyper_paras.json."
parser.add_argument("--paras", action="store", default="paras-1",
dest="hyper_paras_name", help=help_str)
args = parser.parse_args()
main(args.hyper_paras_name)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from tempfile import NamedTemporaryFile
from unittest import TestCase, mock
from airflow.providers.google.marketing_platform.operators.campaign_manager import (
GoogleCampaignManagerBatchInsertConversionsOperator,
GoogleCampaignManagerBatchUpdateConversionsOperator,
GoogleCampaignManagerDeleteReportOperator,
GoogleCampaignManagerDownloadReportOperator,
GoogleCampaignManagerInsertReportOperator,
GoogleCampaignManagerRunReportOperator,
)
API_VERSION = "api_version"
GCP_CONN_ID = "google_cloud_default"
CONVERSION = {
"kind": "dfareporting#conversion",
"floodlightActivityId": 1234,
"floodlightConfigurationId": 1234,
"gclid": "971nc2849184c1914019v1c34c14",
"ordinal": "0",
"customVariables": [
{
"kind": "dfareporting#customFloodlightVariable",
"type": "U10",
"value": "value",
}
],
}
class TestGoogleCampaignManagerDeleteReportOperator(TestCase):
@mock.patch(
"airflow.providers.google.marketing_platform.operators.campaign_manager.GoogleCampaignManagerHook"
)
@mock.patch("airflow.providers.google.marketing_platform.operators.campaign_manager.BaseOperator")
def test_execute(self, mock_base_op, hook_mock):
profile_id = "PROFILE_ID"
report_id = "REPORT_ID"
op = GoogleCampaignManagerDeleteReportOperator(
profile_id=profile_id,
report_id=report_id,
api_version=API_VERSION,
task_id="test_task",
)
op.execute(context=None)
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
delegate_to=None,
api_version=API_VERSION,
impersonation_chain=None,
)
hook_mock.return_value.delete_report.assert_called_once_with(
profile_id=profile_id, report_id=report_id
)
class TestGoogleCampaignManagerGetReportOperator(TestCase):
@mock.patch("airflow.providers.google.marketing_platform.operators.campaign_manager.http")
@mock.patch("airflow.providers.google.marketing_platform.operators.campaign_manager.tempfile")
@mock.patch(
"airflow.providers.google.marketing_platform.operators.campaign_manager.GoogleCampaignManagerHook"
)
@mock.patch("airflow.providers.google.marketing_platform.operators.campaign_manager.GCSHook")
@mock.patch("airflow.providers.google.marketing_platform.operators.campaign_manager.BaseOperator")
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GoogleCampaignManagerDownloadReportOperator.xcom_push"
)
def test_execute(
self,
xcom_mock,
mock_base_op,
gcs_hook_mock,
hook_mock,
tempfile_mock,
http_mock,
):
profile_id = "PROFILE_ID"
report_id = "REPORT_ID"
file_id = "FILE_ID"
bucket_name = "test_bucket"
report_name = "test_report.csv"
temp_file_name = "TEST"
http_mock.MediaIoBaseDownload.return_value.next_chunk.return_value = (
None,
True,
)
tempfile_mock.NamedTemporaryFile.return_value.__enter__.return_value.name = temp_file_name
op = GoogleCampaignManagerDownloadReportOperator(
profile_id=profile_id,
report_id=report_id,
file_id=file_id,
bucket_name=bucket_name,
report_name=report_name,
api_version=API_VERSION,
task_id="test_task",
)
op.execute(context=None)
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
delegate_to=None,
api_version=API_VERSION,
impersonation_chain=None,
)
hook_mock.return_value.get_report_file.assert_called_once_with(
profile_id=profile_id, report_id=report_id, file_id=file_id
)
gcs_hook_mock.assert_called_once_with(
google_cloud_storage_conn_id=GCP_CONN_ID,
delegate_to=None,
impersonation_chain=None,
)
gcs_hook_mock.return_value.upload.assert_called_once_with(
bucket_name=bucket_name,
object_name=report_name + ".gz",
gzip=True,
filename=temp_file_name,
mime_type="text/csv",
)
xcom_mock.assert_called_once_with(None, key="report_name", value=report_name + ".gz")
class TestGoogleCampaignManagerInsertReportOperator(TestCase):
@mock.patch(
"airflow.providers.google.marketing_platform.operators.campaign_manager.GoogleCampaignManagerHook"
)
@mock.patch("airflow.providers.google.marketing_platform.operators.campaign_manager.BaseOperator")
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GoogleCampaignManagerInsertReportOperator.xcom_push"
)
def test_execute(self, xcom_mock, mock_base_op, hook_mock):
profile_id = "PROFILE_ID"
report = {"report": "test"}
report_id = "test"
hook_mock.return_value.insert_report.return_value = {"id": report_id}
op = GoogleCampaignManagerInsertReportOperator(
profile_id=profile_id,
report=report,
api_version=API_VERSION,
task_id="test_task",
)
op.execute(context=None)
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
delegate_to=None,
api_version=API_VERSION,
impersonation_chain=None,
)
hook_mock.return_value.insert_report.assert_called_once_with(profile_id=profile_id, report=report)
xcom_mock.assert_called_once_with(None, key="report_id", value=report_id)
def test_prepare_template(self):
profile_id = "PROFILE_ID"
report = {"key": "value"}
with NamedTemporaryFile("w+", suffix=".json") as f:
f.write(json.dumps(report))
f.flush()
op = GoogleCampaignManagerInsertReportOperator(
profile_id=profile_id,
report=f.name,
api_version=API_VERSION,
task_id="test_task",
)
op.prepare_template()
assert isinstance(op.report, dict)
assert op.report == report
class TestGoogleCampaignManagerRunReportOperator(TestCase):
@mock.patch(
"airflow.providers.google.marketing_platform.operators.campaign_manager.GoogleCampaignManagerHook"
)
@mock.patch("airflow.providers.google.marketing_platform.operators.campaign_manager.BaseOperator")
@mock.patch(
"airflow.providers.google.marketing_platform.operators."
"campaign_manager.GoogleCampaignManagerRunReportOperator.xcom_push"
)
def test_execute(self, xcom_mock, mock_base_op, hook_mock):
profile_id = "PROFILE_ID"
report_id = "REPORT_ID"
file_id = "FILE_ID"
synchronous = True
hook_mock.return_value.run_report.return_value = {"id": file_id}
op = GoogleCampaignManagerRunReportOperator(
profile_id=profile_id,
report_id=report_id,
synchronous=synchronous,
api_version=API_VERSION,
task_id="test_task",
)
op.execute(context=None)
hook_mock.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
delegate_to=None,
api_version=API_VERSION,
impersonation_chain=None,
)
hook_mock.return_value.run_report.assert_called_once_with(
profile_id=profile_id, report_id=report_id, synchronous=synchronous
)
xcom_mock.assert_called_once_with(None, key="file_id", value=file_id)
class TestGoogleCampaignManagerBatchInsertConversionsOperator(TestCase):
@mock.patch(
"airflow.providers.google.marketing_platform.operators.campaign_manager.GoogleCampaignManagerHook"
)
@mock.patch("airflow.providers.google.marketing_platform.operators.campaign_manager.BaseOperator")
def test_execute(self, mock_base_op, hook_mock):
profile_id = "PROFILE_ID"
op = GoogleCampaignManagerBatchInsertConversionsOperator(
task_id="insert_conversion",
profile_id=profile_id,
conversions=[CONVERSION],
encryption_source="AD_SERVING",
encryption_entity_type="DCM_ADVERTISER",
encryption_entity_id=123456789,
)
op.execute(None)
hook_mock.return_value.conversions_batch_insert.assert_called_once_with(
profile_id=profile_id,
conversions=[CONVERSION],
encryption_source="AD_SERVING",
encryption_entity_type="DCM_ADVERTISER",
encryption_entity_id=123456789,
max_failed_inserts=0,
)
class TestGoogleCampaignManagerBatchUpdateConversionOperator(TestCase):
@mock.patch(
"airflow.providers.google.marketing_platform.operators.campaign_manager.GoogleCampaignManagerHook"
)
@mock.patch("airflow.providers.google.marketing_platform.operators.campaign_manager.BaseOperator")
def test_execute(self, mock_base_op, hook_mock):
profile_id = "PROFILE_ID"
op = GoogleCampaignManagerBatchUpdateConversionsOperator(
task_id="update_conversion",
profile_id=profile_id,
conversions=[CONVERSION],
encryption_source="AD_SERVING",
encryption_entity_type="DCM_ADVERTISER",
encryption_entity_id=123456789,
)
op.execute(None)
hook_mock.return_value.conversions_batch_update.assert_called_once_with(
profile_id=profile_id,
conversions=[CONVERSION],
encryption_source="AD_SERVING",
encryption_entity_type="DCM_ADVERTISER",
encryption_entity_id=123456789,
max_failed_updates=0,
)
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 29 07:00:33 2017
@author: Bob Anderson
"""
MIN_SIGMA = 0.1
from math import exp
import numpy as np
from pyoteapp.likelihood_calculations import cum_loglikelihood, aicc, logLikelihoodLine
from pyoteapp.likelihood_calculations import loglikelihood
from numba import njit
def aicModelValue(*, obsValue=None, B=None, A=None, sigmaB=None, sigmaA=None):
assert(B >= A)
assert(sigmaA > 0.0)
assert(sigmaB > 0.0)
# This function determines if an observation point should categorized as a baseline (B)
# point, an event (A) point, or a valid intermediate point using the Akaike Information Criterion
# An intermediate point reflects a more complex model (higher dimension model)
if obsValue >= B:
return B # Categorize as baseline point
if obsValue <= A:
return A # Categorize as event point
if B == A:
return B
sigmaM = sigmaA + (sigmaB - sigmaA) * ((obsValue - A) / (B - A))
loglB = loglikelihood(obsValue, B, sigmaB)
loglM = loglikelihood(B, B, sigmaM) - 1.0 # The -1 is the aic model complexity 'penalty'
loglA = loglikelihood(obsValue, A, sigmaA)
if loglM > loglA and loglM > loglB:
return obsValue # Categorize as valid intermediate value
elif loglB > loglA:
return B # Categorize as baseline point
else:
return A # Categorize as event point
@njit(cache=True)
def model(B=0.0, A=0.0,
D=-1, R=-1,
sigmaB=0.0, sigmaA=0.0, numPts=0):
# D, R = edgeTuple
assert(numPts > 0 and sigmaA >= 0 and sigmaB >= 0)
# m = np.ndarray(shape=(numPts,), dtype=np.float64)
m = np.zeros(numPts)
# sigma = np.ndarray(shape=(numPts,), dtype=np.float64)
sigma = np.zeros(numPts)
if D == -1 and R == -1: # -1 means None
m[:] = B
sigma[:] = sigmaB
return m, sigma
if not D == -1 and not R == -1: # D and R are not None
assert((D >= 0) and (R > D))
assert((D < numPts) and (R < numPts))
assert(R > D)
m[:D] = B
m[D:R] = A
m[R:] = B
sigma[:D] = sigmaB
sigma[D:R] = sigmaA
sigma[R:] = sigmaB
return m, sigma
if not D == -1 and R == -1: # R is None
assert((D >= 0) and (D < numPts))
m[:D] = B
m[D:] = A
sigma[:D] = sigmaB
sigma[D:] = sigmaA
return m, sigma
if not R == -1 and D == -1: # D is None
assert((R >= 1) and (R < numPts))
m[:R] = A
m[R:] = B
sigma[:R] = sigmaA
sigma[R:] = sigmaB
return m, sigma
raise Exception("Unexpected condition.")
def candidatesFromEventSize(*, eventType='DandR',
left=None, right=None,
minSize=None, maxSize=None):
"""
This is implemented as a generator because if numPts is large, it is
easy to get a candidate list in the many millions, so it's best to
create the candidates one at a time.
"""
assert(right > left)
assert(minSize >= 1)
assert(maxSize >= minSize)
assert(maxSize < right - left)
if eventType == 'Donly':
for D in range(right-maxSize+1, right-minSize+2):
yield D, None
elif eventType == 'Ronly':
for R in range(left+minSize, left+maxSize+1):
yield None, R
elif eventType == 'DandR':
for size in range(minSize, maxSize+1):
for pos in range(left+1, right+1-size):
yield pos, pos+size
else:
raise Exception("Unrecognized event type")
def candidatesFromDandRlimits(*, eventType='DandR',
dLimits=None, rLimits=None):
"""
This is implemented as a generator because if large limits are given, it is
easy to get a very large candidate list, so it's best to
create the candidates one at a time.
"""
# The D and R limits are assumed valid as input and are non-overlapping
if eventType == 'Donly':
for D in range(dLimits[0], dLimits[1] + 1):
yield D, None
elif eventType == 'Ronly':
for R in range(rLimits[0], rLimits[1] + 1):
yield None, R
elif eventType == 'DandR':
for D in range(dLimits[0], dLimits[1] + 1):
for R in range(rLimits[0], rLimits[1] + 1):
yield D, R
else:
raise Exception("Unrecognized edvent type")
@njit(cache=True)
def calcNumCandidatesFromDandRlimits(eventType='DandR', d_start=-1, d_end=-1,
r_start=-1, r_end=-1):
# The D and R limits are assumed valid as input and are non-overlapping
if eventType == 'Donly':
return d_end - d_start + 1
elif eventType == 'Ronly':
return r_end - r_start + 1
elif eventType == 'DandR':
return (d_end - d_start + 1) * (r_end - r_start + 1)
else:
raise Exception("Unrecognized event type")
@njit(cache=True)
def calcNumCandidatesFromEventSize(eventType='DandR',
left=None, right=None,
minSize=None, maxSize=None):
numPts = right - left + 1
assert(numPts >= 0)
assert(minSize >= 1)
assert(maxSize >= minSize)
assert(maxSize < numPts - 1)
if eventType == 'DandR':
c1 = maxSize - minSize + 1
c2 = 2 * numPts - 2 - minSize - maxSize
return int(c1 * c2 / 2)
elif eventType == 'Donly':
return maxSize - minSize + 1
elif eventType == 'Ronly':
return maxSize - minSize + 1
else:
raise Exception("Unrecognized edge specifier")
def calcBandA(*, yValues=None, left=None, right=None, cand=None):
assert(right > left)
D, R = cand # Extract D and R from the tuple
if R is None:
assert(D >= left)
# This is a 'Donly' candidate
# Note that the yValue at D is not included in the B calculation
B = np.mean(yValues[left:D])
# We have to deal with a D at the right edge. There is no value to
# the right to use to calculate A so we simply return the value at D
# as the best estimate of A
if D == right:
A = yValues[D]
else:
A = np.mean(yValues[D+1:right+1])
if A >= B:
A = B * 0.999
return B, A
elif D is None:
assert(R <= right)
# This is an 'Ronly' candidate
# We have to deal with a R at the right edge. There is no value to
# the right to use to calculate B so we simply return the value at R
# as the best estimate of B
if R == right:
B = yValues[R]
else:
B = np.mean(yValues[R+1:right+1])
A = np.mean(yValues[left:R]) # smallest R is left + 1
if A >= B:
A = B * 0.999
return B, A
else:
assert((D >= left) and (R <= right) and (R > D))
# We have a 'DandR' candidate
leftBvals = yValues[left:D] # Smallest D will be 1
if R == right:
rightBvals = yValues[right]
else:
rightBvals = yValues[R+1:right+1]
B = (np.sum(leftBvals) + np.sum(rightBvals)) / (leftBvals.size + rightBvals.size)
if R - D == 1: # Event size of 1 has no valid A --- we choose the value at D
A = yValues[D]
else:
A = np.mean(yValues[D+1:R])
if A >= B:
A = B * 0.999
return B, A
def scoreCandidate(yValues, left, right, cand, sigmaB, sigmaA):
B, A = calcBandA(yValues=yValues, left=left, right=right, cand=cand)
m, sigma = model(B=B, A=A, D=cand[0], R=cand[1],
sigmaB=sigmaB, sigmaA=sigmaA, numPts=yValues.size)
return cum_loglikelihood(yValues, m, sigma, left, right), B, A
def scoreSubFrame(yValues, left, right, cand, sigmaB, sigmaA):
B, A = calcBandA(yValues=yValues, left=left, right=right, cand=cand)
m, sigma = model(B=B, A=A, D=cand[0], R=cand[1],
sigmaB=sigmaB, sigmaA=sigmaA, numPts=yValues.size)
D, R = cand
if D is not None:
if (yValues[D] < B) and (yValues[D] > A):
m[D] = yValues[D]
if R is not None:
if (yValues[R] < B) and (yValues[R] > A):
m[R] = yValues[R]
return cum_loglikelihood(yValues, m, sigma, left, right), B, A
def candidateCounter(*, eventType='DandR',
dLimits=None, rLimits=None,
left=None, right=None,
numPts=None, minSize=None, maxSize=None):
def minMaxOk():
if numPts is None:
return False
if minSize is None:
return False
if maxSize is None:
return False
if minSize < 1:
return False
if maxSize > (numPts - 2):
return False
if maxSize < minSize:
return False
return True
# D and R limits trumps event size as candidate generator/counter
if eventType == 'Donly':
if dLimits:
return ('usedLimits', calcNumCandidatesFromDandRlimits(eventType=eventType,
d_start=dLimits[0], d_end=dLimits[1], r_start=-1, r_end=-1))
else:
if minMaxOk():
return ('usedSize', calcNumCandidatesFromEventSize(eventType=eventType,
left=left, right=right, minSize=minSize, maxSize=maxSize))
else:
return 'error', -1
elif eventType == 'Ronly':
if rLimits:
return ('usedLimits', calcNumCandidatesFromDandRlimits(eventType=eventType,
d_start=-1, d_end=-1, r_start=rLimits[0], r_end=rLimits[1]))
else:
if minMaxOk():
return ('usedSize', calcNumCandidatesFromEventSize(eventType=eventType,
left=left, right=right, minSize=minSize, maxSize=maxSize))
else:
return 'error', -1
elif eventType == 'DandR':
if rLimits and dLimits:
return ('usedLimits', calcNumCandidatesFromDandRlimits(eventType=eventType,
d_start=dLimits[0], d_end=dLimits[1], r_start=rLimits[0], r_end=rLimits[1]))
else:
if minMaxOk():
return ('usedSize', calcNumCandidatesFromEventSize(eventType=eventType,
left=left, right=right, minSize=minSize, maxSize=maxSize))
else:
return 'error', -1
else:
raise Exception("Unrecognized event type")
def subFrameAdjusted(*, eventType=None, cand=None, B=None, A=None,
sigmaA=None, sigmaB=None,
yValues=None, left=None, right=None):
def adjustR():
value = yValues[R]
adj = (B - value) / (B - A)
return R + adj
def adjustD():
value = yValues[D]
adj = (value - A) / (B - A)
return D + adj
D, R = cand
adjD = D
adjR = R
# Here we add code so we can analyze light curves that may have sigmaB or
# sigmaA values of zero. This happens when testing with artificial data
# but can also result from real light curves that may be clipped so that
# all B pixels have a constant value. Limovie can produce a sigmaA=0
# when a rectangular aperture is in use as well
if sigmaA == 0.0:
sigmaA = MIN_SIGMA
if sigmaB == 0.0:
sigmaB = MIN_SIGMA
if eventType == 'Donly':
if aicModelValue(obsValue=yValues[D], B=B, A=A, sigmaB=sigmaB, sigmaA=sigmaA) == yValues[D]:
# If point at D categorizes as M (valid mid-point), do sub-frame
# adjustment and exit
adjD = adjustD()
elif aicModelValue(obsValue=yValues[D], B=B, A=A, sigmaB=sigmaB, sigmaA=sigmaA) == B:
# else if point at D categorizes as B, set D to D+1 and recalculate B and A
D = D + 1
adjD = D
B, A = calcBandA(yValues=yValues, left=left, right=right, cand=(D, R))
# It's possible that this new point qualifies as M --- so we check:
if aicModelValue(
obsValue=yValues[D], B=B, A=A, sigmaB=sigmaB,
sigmaA=sigmaA) == yValues[D]:
adjD = adjustD()
# else (point at D categorizes as A) --- nothing to do
return [adjD, adjR], B, A
elif eventType == 'Ronly':
if aicModelValue(obsValue=yValues[R], B=B, A=A, sigmaB=sigmaB, sigmaA=sigmaA) == yValues[R]:
# If point at R categorizes as M, do sub-frame adjustment
adjR = adjustR()
elif aicModelValue(obsValue=yValues[R], B=B, A=A, sigmaB=sigmaB, sigmaA=sigmaA) == A:
# else if point at R categorizes as A, set R to R + 1 and recalculate B and A
R = R + 1
adjR = R
B, A = calcBandA(yValues=yValues, left=left, right=right, cand=(D, R))
# It's possible that this new point qualifies as M --- so we check
if aicModelValue(
obsValue=yValues[R], B=B, A=A, sigmaB=sigmaB,
sigmaA=sigmaA) == yValues[R]:
adjR = adjustR()
# else (point at R categorizes as B) --- nothing to do
return [adjD, adjR], B, A
elif eventType == 'DandR':
if not R - D > 2:
return [D, R], B, A
if aicModelValue(
obsValue=yValues[D], B=B, A=A, sigmaB=sigmaB, sigmaA=sigmaA) == yValues[D]:
# The point at D categorizes as M, do sub-frame adjustment; this
# (finishes D)
adjD = adjustD()
elif aicModelValue(obsValue=yValues[D], B=B, A=A, sigmaB=sigmaB, sigmaA=sigmaA) == B:
# The point at D categorizes as B, set D to D+1 and recalculate B and A
D = D + 1
adjD = D
# It's possible that this new point qualifies as M --- so we check
if aicModelValue(
obsValue=yValues[D], B=B, A=A, sigmaB=sigmaB,
sigmaA=sigmaA) == yValues[D]:
adjD = adjustD()
B, A = calcBandA(yValues=yValues, left=left, right=right, cand=(D, R))
elif aicModelValue(obsValue=yValues[D-1], B=B, A=A, sigmaB=sigmaB,
sigmaA=sigmaA) == yValues[D-1]:
# The point at D categorizes as A, and we have found
# that the point at D-1 categorizes as M, so set D to D-1 and
# recalculate B and A
D = D - 1
adjD = adjustD()
B, A = calcBandA(yValues=yValues, left=left, right=right,
cand=(D, R))
if aicModelValue(
obsValue=yValues[R], B=B, A=A, sigmaB=sigmaB, sigmaA=sigmaA) == yValues[R]:
# The point at R categorizes as M, do sub-frame adjustment
adjR = adjustR()
elif aicModelValue(obsValue=yValues[R], B=B, A=A, sigmaB=sigmaB, sigmaA=sigmaA) == A:
# The point at R categorizes as A, set R to R + 1 and recalculate B and A
R = R + 1
adjR = R
B, A = calcBandA(yValues=yValues, left=left, right=right, cand=(D, R))
# It's possible that this new point qualifies as M --- so we check
if aicModelValue(
obsValue=yValues[R], B=B, A=A, sigmaB=sigmaB,
sigmaA=sigmaA) == yValues[R]:
adjR = adjustR()
elif aicModelValue(obsValue=yValues[R - 1], B=B, A=A, sigmaB=sigmaB,
sigmaA=sigmaA) == yValues[R - 1]:
# The point at R categorizes as B, and we have found
# that the point at R-1 categorizes as M, so set R to R-1 and
# recalculate B and A
R = R - 1
adjR = adjustR()
B, A = calcBandA(yValues=yValues, left=left, right=right,
cand=(D, R))
return [adjD, adjR], B, A
else:
raise Exception('Unrecognized event type')
def solver(*, eventType=None, yValues=None,
left=None, right=None,
sigmaB=None, sigmaA=None,
dLimits=None, rLimits=None,
minSize=None, maxSize=None):
bestCand = None
bestScore = float('-inf')
bestB = None
bestA = None
mode, numCandidates = candidateCounter(eventType=eventType, dLimits=dLimits, rLimits=rLimits,
left=left, right=right, numPts=yValues.size,
minSize=minSize, maxSize=maxSize)
if mode == 'error':
return bestCand, bestB, bestA
if mode == 'usedLimits':
candGen = candidatesFromDandRlimits(eventType=eventType, dLimits=dLimits,
rLimits=rLimits)
elif mode == 'usedSize':
candGen = candidatesFromEventSize(eventType=eventType, left=left, right=right,
minSize=minSize, maxSize=maxSize)
else:
raise Exception('candidateCounter() returned unexpected "mode" ')
counter = 0
for cand in candGen:
# score, B, A = scoreCandidate(yValues, left, right, cand, sigmaB, sigmaA)
score, B, A = scoreSubFrame(yValues, left, right, cand, sigmaB, sigmaA)
if score > bestScore:
bestScore = score
bestB = B
bestA = A
bestCand = cand
counter += 1
if counter % 1000 == 0:
yield 'fractionDone', counter/numCandidates
if eventType == 'DandR':
k = 4
else:
k = 3
# lineScore = logLikelihoodLine(yValues, sigmaB=sigmaB, left=left, right=right)
lineScore = logLikelihoodLine(yValues, sigmaB=np.sqrt(np.var(yValues)), left=left, right=right)
aiccSol = aicc(bestScore, right-left+1, k)
aiccLine = aicc(lineScore, right-left+1, 1)
if aiccSol < aiccLine:
pLine = exp(-(aiccLine - aiccSol)/2)
else:
pLine = 1.00
if pLine > 0.001:
yield 'no event present', counter/numCandidates
yield subFrameAdjusted(eventType=eventType, cand=bestCand,
B=bestB, A=bestA, sigmaB=sigmaB, sigmaA=sigmaA,
yValues=yValues, left=left, right=right)
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='AcademicYear',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=20, blank=True)),
],
options={
'db_table': 'tb_academic_year',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('address', models.CharField(max_length=1000, blank=True)),
('area', models.CharField(max_length=1000, blank=True)),
('pincode', models.CharField(max_length=20, blank=True)),
('landmark', models.CharField(max_length=1000, blank=True)),
('instidentification', models.CharField(max_length=1000, blank=True)),
('bus', models.CharField(max_length=1000, blank=True)),
('instidentification2', models.CharField(max_length=1000, blank=True)),
],
options={
'db_table': 'tb_address',
'managed': False,
'verbose_name_plural': 'Addresses',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AnganwadiDisplayMaster',
fields=[
('key', models.CharField(max_length=36, serialize=False, primary_key=True)),
('value', models.CharField(max_length=200, blank=True)),
],
options={
'db_table': 'mvw_ang_display_master',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AngDisplayMaster',
fields=[
('key', models.CharField(max_length=30, serialize=False, primary_key=True)),
('value', models.CharField(max_length=200)),
],
options={
'db_table': 'mvw_ang_display_master',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Assembly',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True, db_column='ac_id')),
('gid', models.IntegerField()),
('number', models.IntegerField(db_column='ac_no')),
('name', models.CharField(max_length=35, db_column='ac_name')),
('state_ut', models.CharField(max_length=35)),
('coord', django.contrib.gis.db.models.fields.GeometryField(srid=4326, db_column='the_geom')),
],
options={
'db_table': 'mvw_assembly',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Assessment',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=300)),
('start', models.DateField(null=True, blank=True)),
('end', models.DateField(null=True, blank=True)),
],
options={
'db_table': 'tb_assessment',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Boundary',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=300)),
],
options={
'db_table': 'tb_boundary',
'managed': False,
'verbose_name_plural': 'Boundaries',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BoundaryCoord',
fields=[
('boundary', models.OneToOneField(primary_key=True, db_column='id_bndry', serialize=False, to='schools.Boundary')),
('typ', models.CharField(max_length=20, db_column='type')),
('coord', django.contrib.gis.db.models.fields.GeometryField(srid=4326)),
],
options={
'db_table': 'mvw_boundary_coord',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BoundaryHierarchy',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=300)),
],
options={
'db_table': 'tb_bhierarchy',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BoundaryPrimarySchool',
fields=[
('cluster', models.ForeignKey(related_name='boundary_ps_cluster', primary_key=True, db_column='cluster_id', serialize=False, to='schools.Boundary')),
],
options={
'db_table': 'mvw_boundary_primary',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='BoundaryType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=300)),
],
options={
'db_table': 'tb_boundary_type',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Child',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=300, blank=True)),
('dob', models.DateField(null=True, blank=True)),
('sex', models.CharField(max_length=128, choices=[('male', 'Male'), ('female', 'Female')])),
('mt', models.CharField(max_length=128, choices=[('bengali', 'Bengali'), ('english', 'English'), ('gujarathi', 'Gujarathi'), ('hindi', 'Hindi'), ('kannada', 'Kannada'), ('konkani', 'Konkani'), ('malayalam', 'Malayalam'), ('marathi', 'Marathi'), ('nepali', 'Nepali'), ('oriya', 'Oriya'), ('sanskrit', 'Sanskrit'), ('sindhi', 'Sindhi'), ('tamil', 'Tamil'), ('telugu', 'Telugu'), ('urdu', 'Urdu'), ('multi lng', 'Multi Lingual'), ('other', 'Other'), ('not known', 'Not known')])),
],
options={
'db_table': 'tb_child',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DiseDisplayMaster',
fields=[
('key', models.CharField(max_length=36, serialize=False, primary_key=True)),
('value', models.CharField(max_length=200, blank=True)),
],
options={
'db_table': 'mvw_dise_display_master',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DiseInfo',
fields=[
('dise_code', models.CharField(max_length=32, serialize=False, primary_key=True)),
('classroom_count', models.IntegerField(null=True, blank=True)),
('teacher_count', models.IntegerField(null=True, blank=True)),
('boys_count', models.IntegerField(null=True, blank=True)),
('girls_count', models.IntegerField(null=True, blank=True)),
('lowest_class', models.IntegerField(null=True, blank=True)),
('highest_class', models.IntegerField(null=True, blank=True)),
('acyear', models.CharField(max_length=15, blank=True)),
('sg_recd', models.IntegerField(null=True, blank=True)),
('sg_expnd', models.IntegerField(null=True, blank=True)),
('tlm_recd', models.IntegerField(null=True, blank=True)),
('tlm_expnd', models.IntegerField(null=True, blank=True)),
('ffs_recd', models.IntegerField(null=True, blank=True)),
('ffs_expnd', models.IntegerField(null=True, blank=True)),
('books_in_library', models.IntegerField(null=True, blank=True)),
],
options={
'db_table': 'mvw_dise_info_olap',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DiseFacilityAgg',
fields=[
('dise_info', models.ForeignKey(primary_key=True, db_column='dise_code', serialize=False, to='schools.DiseInfo')),
('score', models.DecimalField(null=True, max_digits=5, decimal_places=0, blank=True)),
('df_group', models.CharField(max_length=30, blank=True)),
],
options={
'db_table': 'mvw_dise_facility_agg',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DiseRteAgg',
fields=[
('dise_info', models.ForeignKey(primary_key=True, db_column='dise_code', serialize=False, to='schools.DiseInfo')),
('status', models.CharField(max_length=30, blank=True)),
('rte_group', models.CharField(max_length=32, blank=True)),
],
options={
'db_table': 'mvw_dise_rte_agg',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ElectedrepMaster',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('elec_comm_code', models.IntegerField(null=True, blank=True)),
('const_ward_name', models.CharField(max_length=300, blank=True)),
('const_ward_type', models.TextField(blank=True)),
('neighbours', models.CharField(max_length=100, blank=True)),
('current_elected_rep', models.CharField(max_length=300, blank=True)),
('current_elected_party', models.CharField(max_length=300, blank=True)),
],
options={
'db_table': 'mvw_electedrep_master',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PaisaData',
fields=[
('grant_type', models.CharField(max_length=32, serialize=False, primary_key=True)),
('grant_amount', models.IntegerField(null=True, blank=True)),
('criteria', models.CharField(max_length=32, blank=True)),
('operator', models.CharField(max_length=3, blank=True)),
('factor', models.CharField(max_length=32, blank=True)),
],
options={
'db_table': 'mvw_paisa_data',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Parliament',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True, db_column='pc_id')),
('gid', models.IntegerField()),
('number', models.IntegerField(db_column='pc_no')),
('name', models.CharField(max_length=35, db_column='pc_name')),
('state_ut', models.CharField(max_length=35)),
('coord', django.contrib.gis.db.models.fields.GeometryField(srid=4326, db_column='the_geom')),
],
options={
'db_table': 'mvw_parliament',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Partner',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=300)),
('status', models.IntegerField()),
('info', models.CharField(max_length=500, blank=True)),
],
options={
'db_table': 'tb_partner',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Postal',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True, db_column='pin_id')),
('gid', models.IntegerField()),
('pincode', models.CharField(max_length=35)),
('coord', django.contrib.gis.db.models.fields.GeometryField(srid=4326, db_column='the_geom')),
],
options={
'db_table': 'mvw_postal',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Programme',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=300)),
('start', models.DateField(null=True, blank=True)),
('end', models.DateField(null=True, blank=True)),
],
options={
'db_table': 'tb_programme',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('desc', models.CharField(max_length=100)),
('qtype', models.IntegerField(null=True, blank=True)),
('maxmarks', models.DecimalField(null=True, max_digits=65535, decimal_places=65535, blank=True)),
('minmarks', models.DecimalField(null=True, max_digits=65535, decimal_places=65535, blank=True)),
('grade', models.CharField(max_length=100, blank=True)),
],
options={
'db_table': 'tb_question',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='School',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=300)),
('cat', models.CharField(max_length=128, choices=[('Model Primary', 'Model Primary'), ('Anganwadi', 'Anganwadi'), ('Lower Primary', 'Lower Primary'), ('Secondary', 'Secondary'), ('Akshara Balwadi', 'Akshara Balwadi'), ('Independent Balwadi', 'Independent Balwadi'), ('Upper Primary', 'Upper Primary')])),
('sex', models.CharField(max_length=128, choices=[('boys', 'Boys'), ('girls', 'Girls'), ('co-ed', 'Co-education')])),
('moi', models.CharField(max_length=128, choices=[('bengali', 'Bengali'), ('english', 'English'), ('gujarathi', 'Gujarathi'), ('hindi', 'Hindi'), ('kannada', 'Kannada'), ('konkani', 'Konkani'), ('malayalam', 'Malayalam'), ('marathi', 'Marathi'), ('nepali', 'Nepali'), ('oriya', 'Oriya'), ('sanskrit', 'Sanskrit'), ('sindhi', 'Sindhi'), ('tamil', 'Tamil'), ('telugu', 'Telugu'), ('urdu', 'Urdu'), ('multi lng', 'Multi Lingual'), ('other', 'Other'), ('not known', 'Not known')])),
('mgmt', models.CharField(max_length=128, choices=[('p-a', 'Private Aided'), ('ed', 'Department of Education'), ('p-ua', 'Private Unaided')])),
('status', models.IntegerField()),
],
options={
'db_table': 'tb_school',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PreschoolBasicAssessmentInfo',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='sid', serialize=False, to='schools.School')),
('agegroup', models.CharField(max_length=50, blank=True)),
('sex', models.CharField(max_length=128, choices=[('male', 'Male'), ('female', 'Female')])),
('mt', models.CharField(max_length=128, choices=[('bengali', 'Bengali'), ('english', 'English'), ('gujarathi', 'Gujarathi'), ('hindi', 'Hindi'), ('kannada', 'Kannada'), ('konkani', 'Konkani'), ('malayalam', 'Malayalam'), ('marathi', 'Marathi'), ('nepali', 'Nepali'), ('oriya', 'Oriya'), ('sanskrit', 'Sanskrit'), ('sindhi', 'Sindhi'), ('tamil', 'Tamil'), ('telugu', 'Telugu'), ('urdu', 'Urdu'), ('multi lng', 'Multi Lingual'), ('other', 'Other'), ('not known', 'Not known')])),
('num', models.IntegerField(null=True, blank=True)),
],
options={
'db_table': 'tb_preschool_basic_assessment_info',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PreschoolAssessmentAgg',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='sid', serialize=False, to='schools.School')),
('agegroup', models.CharField(max_length=50, blank=True)),
('sex', models.CharField(max_length=128, choices=[('male', 'Male'), ('female', 'Female')])),
('mt', models.CharField(max_length=128, choices=[('bengali', 'Bengali'), ('english', 'English'), ('gujarathi', 'Gujarathi'), ('hindi', 'Hindi'), ('kannada', 'Kannada'), ('konkani', 'Konkani'), ('malayalam', 'Malayalam'), ('marathi', 'Marathi'), ('nepali', 'Nepali'), ('oriya', 'Oriya'), ('sanskrit', 'Sanskrit'), ('sindhi', 'Sindhi'), ('tamil', 'Tamil'), ('telugu', 'Telugu'), ('urdu', 'Urdu'), ('multi lng', 'Multi Lingual'), ('other', 'Other'), ('not known', 'Not known')])),
('aggtext', models.CharField(max_length=100)),
('aggval', models.DecimalField(null=True, max_digits=6, decimal_places=2, blank=True)),
],
options={
'db_table': 'tb_preschool_assessment_agg',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MdmAgg',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='klpid', serialize=False, to='schools.School')),
('mon', models.CharField(max_length=15, blank=True)),
('wk', models.IntegerField(null=True, blank=True)),
('indent', models.IntegerField(null=True, blank=True)),
('attend', models.IntegerField(null=True, blank=True)),
],
options={
'db_table': 'mvw_mdm_agg',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LibLevelAgg',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='klp_school_id', serialize=False, to='schools.School')),
('class_name', models.IntegerField(null=True, db_column='class', blank=True)),
('month', models.CharField(max_length=10, blank=True)),
('year', models.CharField(max_length=10, blank=True)),
('book_level', models.CharField(max_length=50, blank=True)),
('child_count', models.IntegerField(null=True, blank=True)),
],
options={
'db_table': 'mvw_lib_level_agg',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LibLangAgg',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='klp_school_id', serialize=False, to='schools.School')),
('class_name', models.IntegerField(null=True, db_column='class', blank=True)),
('month', models.CharField(max_length=10, blank=True)),
('year', models.CharField(max_length=10, blank=True)),
('book_lang', models.CharField(max_length=50, blank=True)),
('child_count', models.IntegerField(null=True, blank=True)),
],
options={
'db_table': 'mvw_lib_lang_agg',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Libinfra',
fields=[
('school', models.OneToOneField(primary_key=True, db_column='sid', serialize=False, to='schools.School')),
('libstatus', models.CharField(max_length=300, blank=True)),
('handoveryear', models.IntegerField(null=True, blank=True)),
('libtype', models.CharField(max_length=300, blank=True)),
('numbooks', models.IntegerField(null=True, blank=True)),
('numracks', models.IntegerField(null=True, blank=True)),
('numtables', models.IntegerField(null=True, blank=True)),
('numchairs', models.IntegerField(null=True, blank=True)),
('numcomputers', models.IntegerField(null=True, blank=True)),
('numups', models.IntegerField(null=True, blank=True)),
],
options={
'db_table': 'mvw_libinfra',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LibBorrow',
fields=[
('trans_year', models.CharField(max_length=30, blank=True)),
('class_name', models.DecimalField(null=True, decimal_places=0, max_digits=3, db_column='class', blank=True)),
('issue_date', models.CharField(max_length=20, blank=True)),
('school', models.ForeignKey(primary_key=True, db_column='klp_school_id', serialize=False, to='schools.School')),
('school_name', models.CharField(max_length=50, blank=True)),
],
options={
'db_table': 'mvw_lib_borrow',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='InstitutionBasicAssessmentInfoCohorts',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='sid', serialize=False, to='schools.School')),
('studentgroup', models.CharField(max_length=50, blank=True)),
('sex', models.CharField(max_length=128, choices=[('male', 'Male'), ('female', 'Female')])),
('mt', models.CharField(max_length=128, choices=[('bengali', 'Bengali'), ('english', 'English'), ('gujarathi', 'Gujarathi'), ('hindi', 'Hindi'), ('kannada', 'Kannada'), ('konkani', 'Konkani'), ('malayalam', 'Malayalam'), ('marathi', 'Marathi'), ('nepali', 'Nepali'), ('oriya', 'Oriya'), ('sanskrit', 'Sanskrit'), ('sindhi', 'Sindhi'), ('tamil', 'Tamil'), ('telugu', 'Telugu'), ('urdu', 'Urdu'), ('multi lng', 'Multi Lingual'), ('other', 'Other'), ('not known', 'Not known')])),
('cohortsnum', models.IntegerField(null=True, blank=True)),
],
options={
'verbose_name': 'InstBasicAssInfoCohorts',
'db_table': 'tb_institution_basic_assessment_info_cohorts',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='InstitutionBasicAssessmentInfo',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='sid', serialize=False, to='schools.School')),
('studentgroup', models.CharField(max_length=50, blank=True)),
('sex', models.CharField(max_length=128, choices=[('male', 'Male'), ('female', 'Female')])),
('mt', models.CharField(max_length=128, choices=[('bengali', 'Bengali'), ('english', 'English'), ('gujarathi', 'Gujarathi'), ('hindi', 'Hindi'), ('kannada', 'Kannada'), ('konkani', 'Konkani'), ('malayalam', 'Malayalam'), ('marathi', 'Marathi'), ('nepali', 'Nepali'), ('oriya', 'Oriya'), ('sanskrit', 'Sanskrit'), ('sindhi', 'Sindhi'), ('tamil', 'Tamil'), ('telugu', 'Telugu'), ('urdu', 'Urdu'), ('multi lng', 'Multi Lingual'), ('other', 'Other'), ('not known', 'Not known')])),
('num', models.IntegerField(null=True, blank=True)),
],
options={
'verbose_name': 'InstBasicAssInfo',
'db_table': 'tb_institution_basic_assessment_info',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='InstitutionAssessmentSinglescoreMt',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='sid', serialize=False, to='schools.School')),
('asstext', models.CharField(max_length=50)),
('mt', models.CharField(max_length=128, choices=[('bengali', 'Bengali'), ('english', 'English'), ('gujarathi', 'Gujarathi'), ('hindi', 'Hindi'), ('kannada', 'Kannada'), ('konkani', 'Konkani'), ('malayalam', 'Malayalam'), ('marathi', 'Marathi'), ('nepali', 'Nepali'), ('oriya', 'Oriya'), ('sanskrit', 'Sanskrit'), ('sindhi', 'Sindhi'), ('tamil', 'Tamil'), ('telugu', 'Telugu'), ('urdu', 'Urdu'), ('multi lng', 'Multi Lingual'), ('other', 'Other'), ('not known', 'Not known')])),
('singlescore', models.DecimalField(null=True, max_digits=6, decimal_places=2, blank=True)),
('order', models.IntegerField(null=True, blank=True)),
],
options={
'verbose_name': 'InstAssSingleScoreMt',
'db_table': 'tb_institution_assessment_singlescore_mt',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='InstitutionAssessmentSinglescoreGender',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='sid', serialize=False, to='schools.School')),
('asstext', models.CharField(max_length=50)),
('sex', models.CharField(max_length=128, choices=[('male', 'Male'), ('female', 'Female')])),
('singlescore', models.DecimalField(null=True, max_digits=6, decimal_places=2, blank=True)),
('order', models.IntegerField(null=True, blank=True)),
],
options={
'verbose_name': 'InstAssSingleScoreGender',
'db_table': 'tb_institution_assessment_singlescore_gender',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='InstitutionAssessmentSinglescore',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='sid', serialize=False, to='schools.School')),
('asstext', models.CharField(max_length=50)),
('singlescore', models.DecimalField(null=True, max_digits=6, decimal_places=2, blank=True)),
('order', models.IntegerField(null=True, blank=True)),
],
options={
'verbose_name': 'InstAssSingleScore',
'db_table': 'tb_institution_assessment_singlescore',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='InstitutionAssessmentReadingAggCohorts',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='sid', serialize=False, to='schools.School')),
('studentgroup', models.CharField(max_length=50, blank=True)),
('sex', models.CharField(max_length=128, choices=[('male', 'Male'), ('female', 'Female')])),
('mt', models.CharField(max_length=128, choices=[('bengali', 'Bengali'), ('english', 'English'), ('gujarathi', 'Gujarathi'), ('hindi', 'Hindi'), ('kannada', 'Kannada'), ('konkani', 'Konkani'), ('malayalam', 'Malayalam'), ('marathi', 'Marathi'), ('nepali', 'Nepali'), ('oriya', 'Oriya'), ('sanskrit', 'Sanskrit'), ('sindhi', 'Sindhi'), ('tamil', 'Tamil'), ('telugu', 'Telugu'), ('urdu', 'Urdu'), ('multi lng', 'Multi Lingual'), ('other', 'Other'), ('not known', 'Not known')])),
('domain', models.CharField(max_length=100, blank=True)),
('domain_order', models.IntegerField(null=True, blank=True)),
('aggtext', models.CharField(max_length=100)),
('aggtext_order', models.IntegerField()),
('cohortsval', models.DecimalField(null=True, max_digits=6, decimal_places=2, blank=True)),
],
options={
'verbose_name': 'InstAssReadingAggCohorts',
'db_table': 'tb_institution_assessment_reading_agg_cohorts',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='InstitutionAssessmentMtSinglescore',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='sid', serialize=False, to='schools.School')),
('mt', models.CharField(max_length=128, choices=[('bengali', 'Bengali'), ('english', 'English'), ('gujarathi', 'Gujarathi'), ('hindi', 'Hindi'), ('kannada', 'Kannada'), ('konkani', 'Konkani'), ('malayalam', 'Malayalam'), ('marathi', 'Marathi'), ('nepali', 'Nepali'), ('oriya', 'Oriya'), ('sanskrit', 'Sanskrit'), ('sindhi', 'Sindhi'), ('tamil', 'Tamil'), ('telugu', 'Telugu'), ('urdu', 'Urdu'), ('multi lng', 'Multi Lingual'), ('other', 'Other'), ('not known', 'Not known')])),
('singlescore', models.DecimalField(null=True, max_digits=6, decimal_places=2, blank=True)),
],
options={
'verbose_name': 'InstAssMtSingleScore',
'db_table': 'tb_institution_assessment_mt_singlescore',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='InstitutionAssessmentGenderSinglescore',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='sid', serialize=False, to='schools.School')),
('sex', models.CharField(max_length=128, choices=[('male', 'Male'), ('female', 'Female')])),
('singlescore', models.DecimalField(null=True, max_digits=6, decimal_places=2, blank=True)),
],
options={
'verbose_name': 'InstAssGenderSingleScore',
'db_table': 'tb_institution_assessment_gender_singlescore',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='InstitutionAssessmentAggCohorts',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='sid', serialize=False, to='schools.School')),
('studentgroup', models.CharField(max_length=50, blank=True)),
('sex', models.CharField(max_length=128, choices=[('male', 'Male'), ('female', 'Female')])),
('mt', models.CharField(max_length=128, choices=[('bengali', 'Bengali'), ('english', 'English'), ('gujarathi', 'Gujarathi'), ('hindi', 'Hindi'), ('kannada', 'Kannada'), ('konkani', 'Konkani'), ('malayalam', 'Malayalam'), ('marathi', 'Marathi'), ('nepali', 'Nepali'), ('oriya', 'Oriya'), ('sanskrit', 'Sanskrit'), ('sindhi', 'Sindhi'), ('tamil', 'Tamil'), ('telugu', 'Telugu'), ('urdu', 'Urdu'), ('multi lng', 'Multi Lingual'), ('other', 'Other'), ('not known', 'Not known')])),
('domain', models.CharField(max_length=100, blank=True)),
('domain_order', models.IntegerField(null=True, blank=True)),
('aggtext', models.CharField(max_length=100)),
('aggtext_order', models.IntegerField()),
('cohortsval', models.DecimalField(null=True, max_digits=6, decimal_places=2, blank=True)),
],
options={
'verbose_name': 'InstAssAggCohorts',
'db_table': 'tb_institution_assessment_agg_cohorts',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='InstitutionAssessmentAgg',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='sid', serialize=False, to='schools.School')),
('studentgroup', models.CharField(max_length=50, blank=True)),
('sex', models.CharField(max_length=128, choices=[('male', 'Male'), ('female', 'Female')])),
('mt', models.CharField(max_length=128, choices=[('bengali', 'Bengali'), ('english', 'English'), ('gujarathi', 'Gujarathi'), ('hindi', 'Hindi'), ('kannada', 'Kannada'), ('konkani', 'Konkani'), ('malayalam', 'Malayalam'), ('marathi', 'Marathi'), ('nepali', 'Nepali'), ('oriya', 'Oriya'), ('sanskrit', 'Sanskrit'), ('sindhi', 'Sindhi'), ('tamil', 'Tamil'), ('telugu', 'Telugu'), ('urdu', 'Urdu'), ('multi lng', 'Multi Lingual'), ('other', 'Other'), ('not known', 'Not known')])),
('domain', models.CharField(max_length=100, blank=True)),
('domain_order', models.IntegerField(null=True, blank=True)),
('aggtext', models.CharField(max_length=100)),
('aggtext_order', models.IntegerField()),
('aggval', models.DecimalField(null=True, max_digits=6, decimal_places=2, blank=True)),
],
options={
'db_table': 'tb_institution_assessment_agg',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='InstitutionAgg',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='id', serialize=False, to='schools.School')),
('name', models.CharField(max_length=300, blank=True)),
('sex', models.CharField(max_length=128, choices=[('male', 'Male'), ('female', 'Female')])),
('mt', models.CharField(max_length=128, choices=[('bengali', 'Bengali'), ('english', 'English'), ('gujarathi', 'Gujarathi'), ('hindi', 'Hindi'), ('kannada', 'Kannada'), ('konkani', 'Konkani'), ('malayalam', 'Malayalam'), ('marathi', 'Marathi'), ('nepali', 'Nepali'), ('oriya', 'Oriya'), ('sanskrit', 'Sanskrit'), ('sindhi', 'Sindhi'), ('tamil', 'Tamil'), ('telugu', 'Telugu'), ('urdu', 'Urdu'), ('multi lng', 'Multi Lingual'), ('other', 'Other'), ('not known', 'Not known')])),
('num', models.IntegerField(null=True, blank=True)),
],
options={
'db_table': 'tb_institution_agg',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='InstCoord',
fields=[
('school', models.OneToOneField(primary_key=True, db_column='instid', serialize=False, to='schools.School')),
('coord', django.contrib.gis.db.models.fields.GeometryField(srid=4326)),
],
options={
'db_table': 'mvw_inst_coord',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AngInfraAgg',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='sid', serialize=False, to='schools.School')),
('ai_metric', models.CharField(max_length=30)),
('perc', models.IntegerField()),
('ai_group', models.CharField(max_length=30)),
],
options={
'db_table': 'mvw_anginfra_agg',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AnganwadiInfraAgg',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='sid', serialize=False, to='schools.School')),
('perc_score', models.DecimalField(null=True, max_digits=5, decimal_places=0, blank=True)),
('ai_group', models.CharField(max_length=30, blank=True)),
],
options={
'db_table': 'mvw_ang_infra_agg',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SchoolAgg',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='id', serialize=False, to='schools.School')),
('name', models.CharField(max_length=300, blank=True)),
('sex', models.CharField(max_length=128, choices=[('male', 'Male'), ('female', 'Female')])),
('mt', models.CharField(max_length=128, choices=[('bengali', 'Bengali'), ('english', 'English'), ('gujarathi', 'Gujarathi'), ('hindi', 'Hindi'), ('kannada', 'Kannada'), ('konkani', 'Konkani'), ('malayalam', 'Malayalam'), ('marathi', 'Marathi'), ('nepali', 'Nepali'), ('oriya', 'Oriya'), ('sanskrit', 'Sanskrit'), ('sindhi', 'Sindhi'), ('tamil', 'Tamil'), ('telugu', 'Telugu'), ('urdu', 'Urdu'), ('multi lng', 'Multi Lingual'), ('other', 'Other'), ('not known', 'Not known')])),
('num', models.IntegerField(null=True, blank=True)),
],
options={
'db_table': 'tb_school_agg',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SchoolAssessmentAgg',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='sid', serialize=False, to='schools.School')),
('sex', models.CharField(max_length=128, choices=[('male', 'Male'), ('female', 'Female')])),
('mt', models.CharField(max_length=128, choices=[('bengali', 'Bengali'), ('english', 'English'), ('gujarathi', 'Gujarathi'), ('hindi', 'Hindi'), ('kannada', 'Kannada'), ('konkani', 'Konkani'), ('malayalam', 'Malayalam'), ('marathi', 'Marathi'), ('nepali', 'Nepali'), ('oriya', 'Oriya'), ('sanskrit', 'Sanskrit'), ('sindhi', 'Sindhi'), ('tamil', 'Tamil'), ('telugu', 'Telugu'), ('urdu', 'Urdu'), ('multi lng', 'Multi Lingual'), ('other', 'Other'), ('not known', 'Not known')])),
('aggtext', models.CharField(max_length=100)),
('aggval', models.DecimalField(null=True, max_digits=6, decimal_places=2, blank=True)),
],
options={
'db_table': 'tb_school_assessment_agg',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SchoolBasicAssessmentInfo',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='sid', serialize=False, to='schools.School')),
('sex', models.CharField(max_length=128, choices=[('male', 'Male'), ('female', 'Female')])),
('mt', models.CharField(max_length=128, choices=[('bengali', 'Bengali'), ('english', 'English'), ('gujarathi', 'Gujarathi'), ('hindi', 'Hindi'), ('kannada', 'Kannada'), ('konkani', 'Konkani'), ('malayalam', 'Malayalam'), ('marathi', 'Marathi'), ('nepali', 'Nepali'), ('oriya', 'Oriya'), ('sanskrit', 'Sanskrit'), ('sindhi', 'Sindhi'), ('tamil', 'Tamil'), ('telugu', 'Telugu'), ('urdu', 'Urdu'), ('multi lng', 'Multi Lingual'), ('other', 'Other'), ('not known', 'Not known')])),
('num', models.IntegerField(null=True, blank=True)),
],
options={
'db_table': 'tb_school_basic_assessment_info',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SchoolClassTotalYear',
fields=[
('school', models.ForeignKey(primary_key=True, db_column='schid', serialize=False, to='schools.School')),
('clas', models.IntegerField(db_column='clas')),
('total', models.IntegerField(db_column='total')),
],
options={
'db_table': 'mvw_school_class_total_year',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SchoolDetails',
fields=[
('school', models.OneToOneField(primary_key=True, db_column='id', serialize=False, to='schools.School')),
('num_boys', models.IntegerField(null=True, db_column='num_boys', blank=True)),
('num_girls', models.IntegerField(null=True, db_column='num_girls', blank=True)),
],
options={
'db_table': 'mvw_school_details',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SchoolElectedrep',
fields=[
('school', models.OneToOneField(related_name='electedrep', primary_key=True, db_column='sid', serialize=False, to='schools.School')),
],
options={
'db_table': 'mvw_school_electedrep',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SchoolEval',
fields=[
('school', models.IntegerField(serialize=False, primary_key=True, db_column='sid')),
('domain', models.CharField(max_length=100, blank=True)),
('value', models.CharField(max_length=50, blank=True)),
],
options={
'db_table': 'mvw_school_eval',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('otherstudentid', models.CharField(max_length=100, blank=True)),
('status', models.IntegerField()),
],
options={
'db_table': 'tb_student',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='StudentEval',
fields=[
('question', models.ForeignKey(primary_key=True, db_column='qid', serialize=False, to='schools.Question')),
('mark', models.DecimalField(null=True, max_digits=5, decimal_places=2, blank=True)),
('grade', models.CharField(max_length=30, blank=True)),
],
options={
'db_table': 'tb_student_eval',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='StudentGroup',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=50)),
('section', models.CharField(max_length=1, blank=True)),
],
options={
'db_table': 'tb_class',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='StudentStudentGroup',
fields=[
('student', models.ForeignKey(primary_key=True, db_column='stuid', serialize=False, to='schools.Student')),
('status', models.IntegerField()),
],
options={
'db_table': 'tb_student_class',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Teacher',
fields=[
('id', models.IntegerField(serialize=False, primary_key=True)),
('name', models.CharField(max_length=300, blank=True)),
('sex', models.CharField(max_length=128, choices=[('male', 'Male'), ('female', 'Female')])),
('status', models.IntegerField(null=True, blank=True)),
('mt', models.CharField(max_length=128, choices=[('bengali', 'Bengali'), ('english', 'English'), ('gujarathi', 'Gujarathi'), ('hindi', 'Hindi'), ('kannada', 'Kannada'), ('konkani', 'Konkani'), ('malayalam', 'Malayalam'), ('marathi', 'Marathi'), ('nepali', 'Nepali'), ('oriya', 'Oriya'), ('sanskrit', 'Sanskrit'), ('sindhi', 'Sindhi'), ('tamil', 'Tamil'), ('telugu', 'Telugu'), ('urdu', 'Urdu'), ('multi lng', 'Multi Lingual'), ('other', 'Other'), ('not known', 'Not known')])),
('dateofjoining', models.DateField(null=True, blank=True)),
('type', models.CharField(max_length=50, blank=True)),
],
options={
'db_table': 'tb_teacher',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TeacherQualification',
fields=[
('teacher', models.ForeignKey(primary_key=True, db_column='tid', serialize=False, to='schools.Teacher')),
('qualification', models.CharField(max_length=100)),
],
options={
'db_table': 'tb_teacher_qual',
'managed': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TeacherStudentGroup',
fields=[
('teacher', models.ForeignKey(primary_key=True, db_column='teacherid', serialize=False, to='schools.Teacher')),
('status', models.IntegerField(null=True, blank=True)),
],
options={
'db_table': 'tb_teacher_class',
'managed': False,
},
bases=(models.Model,),
),
]
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds android browsers that can be controlled by telemetry."""
import logging
import os
import subprocess
import sys
from py_utils import dependency_util
from devil import base_error
from devil.android import apk_helper
from telemetry.core import exceptions
from telemetry.core import platform
from telemetry.core import util
from telemetry import decorators
from telemetry.internal.backends import android_browser_backend_settings
from telemetry.internal.backends.chrome import android_browser_backend
from telemetry.internal.browser import browser
from telemetry.internal.browser import possible_browser
from telemetry.internal.platform import android_device
from telemetry.internal.util import binary_manager
CHROME_PACKAGE_NAMES = {
'android-content-shell':
['org.chromium.content_shell_apk',
android_browser_backend_settings.ContentShellBackendSettings,
'ContentShell.apk'],
'android-webview':
['org.chromium.webview_shell',
android_browser_backend_settings.WebviewBackendSettings,
None],
'android-webview-shell':
['org.chromium.android_webview.shell',
android_browser_backend_settings.WebviewShellBackendSettings,
'AndroidWebView.apk'],
'android-chromium':
['org.chromium.chrome',
android_browser_backend_settings.ChromeBackendSettings,
'ChromePublic.apk'],
'android-chrome':
['com.google.android.apps.chrome',
android_browser_backend_settings.ChromeBackendSettings,
'Chrome.apk'],
'android-chrome-work':
['com.chrome.work',
android_browser_backend_settings.ChromeBackendSettings,
None],
'android-chrome-beta':
['com.chrome.beta',
android_browser_backend_settings.ChromeBackendSettings,
None],
'android-chrome-dev':
['com.chrome.dev',
android_browser_backend_settings.ChromeBackendSettings,
None],
'android-chrome-canary':
['com.chrome.canary',
android_browser_backend_settings.ChromeBackendSettings,
None],
'android-system-chrome':
['com.android.chrome',
android_browser_backend_settings.ChromeBackendSettings,
None],
}
class PossibleAndroidBrowser(possible_browser.PossibleBrowser):
"""A launchable android browser instance."""
def __init__(self, browser_type, finder_options, android_platform,
backend_settings, apk_name):
super(PossibleAndroidBrowser, self).__init__(
browser_type, 'android', backend_settings.supports_tab_control)
assert browser_type in FindAllBrowserTypes(finder_options), (
'Please add %s to android_browser_finder.FindAllBrowserTypes' %
browser_type)
self._platform = android_platform
self._platform_backend = (
android_platform._platform_backend) # pylint: disable=protected-access
self._backend_settings = backend_settings
self._local_apk = None
if browser_type == 'exact':
if not os.path.exists(apk_name):
raise exceptions.PathMissingError(
'Unable to find exact apk %s specified by --browser-executable' %
apk_name)
self._local_apk = apk_name
elif browser_type == 'reference':
if not os.path.exists(apk_name):
raise exceptions.PathMissingError(
'Unable to find reference apk at expected location %s.' % apk_name)
self._local_apk = apk_name
elif apk_name:
assert finder_options.chrome_root, (
'Must specify Chromium source to use apk_name')
chrome_root = finder_options.chrome_root
candidate_apks = []
for build_path in util.GetBuildDirectories(chrome_root):
apk_full_name = os.path.join(build_path, 'apks', apk_name)
if os.path.exists(apk_full_name):
last_changed = os.path.getmtime(apk_full_name)
candidate_apks.append((last_changed, apk_full_name))
if candidate_apks:
# Find the candidate .apk with the latest modification time.
newest_apk_path = sorted(candidate_apks)[-1][1]
self._local_apk = newest_apk_path
def __repr__(self):
return 'PossibleAndroidBrowser(browser_type=%s)' % self.browser_type
def _InitPlatformIfNeeded(self):
pass
def Create(self, finder_options):
self._InitPlatformIfNeeded()
browser_backend = android_browser_backend.AndroidBrowserBackend(
self._platform_backend,
finder_options.browser_options, self._backend_settings)
try:
return browser.Browser(
browser_backend, self._platform_backend, self._credentials_path)
except Exception:
logging.exception('Failure while creating Android browser.')
original_exception = sys.exc_info()
try:
browser_backend.Close()
except Exception:
logging.exception('Secondary failure while closing browser backend.')
raise original_exception[0], original_exception[1], original_exception[2]
def SupportsOptions(self, browser_options):
if len(browser_options.extensions_to_load) != 0:
return False
return True
def HaveLocalAPK(self):
return self._local_apk and os.path.exists(self._local_apk)
@decorators.Cache
def UpdateExecutableIfNeeded(self):
if self.HaveLocalAPK():
logging.warn('Installing %s on device if needed.' % self._local_apk)
self.platform.InstallApplication(self._local_apk)
def last_modification_time(self):
if self.HaveLocalAPK():
return os.path.getmtime(self._local_apk)
return -1
def SelectDefaultBrowser(possible_browsers):
"""Return the newest possible browser."""
if not possible_browsers:
return None
return max(possible_browsers, key=lambda b: b.last_modification_time())
def CanFindAvailableBrowsers():
return android_device.CanDiscoverDevices()
def CanPossiblyHandlePath(target_path):
return os.path.splitext(target_path.lower())[1] == '.apk'
def FindAllBrowserTypes(options):
del options # unused
return CHROME_PACKAGE_NAMES.keys() + ['exact', 'reference']
def _FindAllPossibleBrowsers(finder_options, android_platform):
"""Testable version of FindAllAvailableBrowsers."""
if not android_platform:
return []
possible_browsers = []
# Add the exact APK if given.
if (finder_options.browser_executable and
CanPossiblyHandlePath(finder_options.browser_executable)):
apk_name = os.path.basename(finder_options.browser_executable)
normalized_path = os.path.expanduser(finder_options.browser_executable)
exact_package = apk_helper.GetPackageName(normalized_path)
package_info = next(
(info for info in CHROME_PACKAGE_NAMES.itervalues()
if info[0] == exact_package or info[2] == apk_name), None)
# It is okay if the APK name or package doesn't match any of known chrome
# browser APKs, since it may be of a different browser.
if package_info:
if not exact_package:
raise exceptions.PackageDetectionError(
'Unable to find package for %s specified by --browser-executable' %
normalized_path)
[package, backend_settings, _] = package_info
if package == exact_package:
possible_browsers.append(PossibleAndroidBrowser(
'exact',
finder_options,
android_platform,
backend_settings(package),
normalized_path))
else:
raise exceptions.UnknownPackageError(
'%s specified by --browser-executable has an unknown package: %s' %
(normalized_path, exact_package))
# Add the reference build if found.
os_version = dependency_util.GetChromeApkOsVersion(
android_platform.GetOSVersionName())
arch = android_platform.GetArchName()
try:
reference_build = binary_manager.FetchPath(
'chrome_stable', arch, 'android', os_version)
except (binary_manager.NoPathFoundError,
binary_manager.CloudStorageError):
reference_build = None
if reference_build and os.path.exists(reference_build):
# TODO(aiolos): how do we stably map the android chrome_stable apk to the
# correct package name?
package, backend_settings, _ = CHROME_PACKAGE_NAMES['android-chrome']
possible_browsers.append(PossibleAndroidBrowser(
'reference',
finder_options,
android_platform,
backend_settings(package),
reference_build))
# Add any known local versions.
for name, package_info in CHROME_PACKAGE_NAMES.iteritems():
package, backend_settings, apk_name = package_info
if apk_name and not finder_options.chrome_root:
continue
b = PossibleAndroidBrowser(name,
finder_options,
android_platform,
backend_settings(package),
apk_name)
if b.platform.CanLaunchApplication(package) or b.HaveLocalAPK():
possible_browsers.append(b)
return possible_browsers
def FindAllAvailableBrowsers(finder_options, device):
"""Finds all the possible browsers on one device.
The device is either the only device on the host platform,
or |finder_options| specifies a particular device.
"""
if not isinstance(device, android_device.AndroidDevice):
return []
try:
android_platform = platform.GetPlatformForDevice(device, finder_options)
return _FindAllPossibleBrowsers(finder_options, android_platform)
except base_error.BaseError as e:
logging.error('Unable to find browsers on %s: %s', device.device_id, str(e))
ps_output = subprocess.check_output(['ps', '-ef'])
logging.error('Ongoing processes:\n%s', ps_output)
return []
| |
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for HJM module."""
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
@test_util.run_all_in_graph_and_eager_modes
class HJMModelTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
self.mean_reversion_1_factor = [0.03]
self.volatility_1_factor = [0.01]
self.mean_reversion_batch_1_factor = [[0.03], [0.04]]
self.mean_reversion_batch_2_factor = [[0.03, 0.03], [0.04, 0.04]]
self.volatility_batch_1_factor = [[0.01], [0.015]]
self.volatility_batch_2_factor = [[0.005, 0.006], [0.004, 0.008]]
self.mean_reversion_4_factor = [0.03, 0.02, 0.01, 0.005]
self.volatility_4_factor = [0.01, 0.011, 0.015, 0.008]
self.volatility_time_dep_1_factor = [0.01, 0.02, 0.01]
self.instant_forward_rate = lambda *args: [0.01]
def _instant_forward_rate_batch(t):
ones = tf.transpose(tf.expand_dims(tf.ones_like(t), axis=0))
return tf.transpose(tf.constant([0.01, 0.02], dtype=t.dtype) * ones)
self.instant_forward_rate_batch = _instant_forward_rate_batch
self.initial_state = [0.01, 0.01]
self.initial_state_batch = [[[0.01]], [[0.02]]]
# See D. Brigo, F. Mercurio. Interest Rate Models. 2007.
def _true_mean(t, mr, vol, istate, f_0_t):
dtype = np.float64
a = dtype(mr)
sigma = dtype(vol)
initial_state = dtype(istate)
return (dtype(f_0_t)
+ (sigma * sigma / 2 / a**2)
* (1.0 - np.exp(-a * t))**2
- f_0_t * np.exp(-a * t)
+ initial_state * np.exp(-a * t))
self.true_mean = _true_mean
def _true_var(t, mr, vol):
dtype = np.float64
a = dtype(mr)
sigma = dtype(vol)
return (sigma * sigma / 2 / a) * (1.0 - np.exp(-2 * a * t))
self.true_var = _true_var
def _true_std_time_dep(t, intervals, vol, k):
res = np.zeros_like(t, dtype=np.float64)
for i, tt in enumerate(t):
var = 0.0
for j in range(len(intervals) - 1):
if tt >= intervals[j] and tt < intervals[j + 1]:
var = var + vol[j]**2 / 2 / k * (
np.exp(2 * k * tt) - np.exp(2 * k * intervals[j]))
break
else:
var = var + vol[j]**2 / 2 / k * (
np.exp(2 * k * intervals[j + 1]) - np.exp(2 * k * intervals[j]))
else:
var = var + vol[-1]**2/2/k *(np.exp(2*k*tt)-np.exp(2*k*intervals[-1]))
res[i] = np.exp(-k*tt) * np.sqrt(var)
return res
self.true_std_time_dep = _true_std_time_dep
def _true_zcb_std(t, tau, v, k):
e_tau = np.exp(-k*tau)
et = np.exp(k*t)
val = v/k * (1. - e_tau*et) * np.sqrt((1.-1./et/et)/k/2)
return val
self.true_zcb_std = _true_zcb_std
super(HJMModelTest, self).setUp()
@parameterized.named_parameters({
'testcase_name': 'no_xla_time_step',
'use_xla': False,
'time_step': 0.1,
'num_time_steps': None,
}, {
'testcase_name': 'xla',
'use_xla': True,
'time_step': 0.1,
'num_time_steps': None,
}, {
'testcase_name': 'no_xla_num_time_steps',
'use_xla': False,
'time_step': None,
'num_time_steps': 11,
}, {
'testcase_name': 'xla_num_time_steps',
'use_xla': True,
'time_step': None,
'num_time_steps': 11,
})
def test_mean_and_variance_1d(self, use_xla, time_step, num_time_steps):
"""Tests 1-Factor model with constant parameters."""
dtype = tf.float64
# exact discretization is not supported for time-dependent specification
# of mean reversion rate.
process = tff.models.hjm.QuasiGaussianHJM(
dim=1,
mean_reversion=self.mean_reversion_1_factor,
volatility=self.volatility_1_factor,
initial_discount_rate_fn=self.instant_forward_rate,
dtype=dtype)
num_samples = 10_000
def _fn():
paths, _, _, _ = process.sample_paths(
[0.1, 0.5, 1.0],
num_samples=num_samples,
time_step=time_step,
num_time_steps=num_time_steps,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
seed=[1, 2],
skip=1000000)
return paths
if use_xla:
paths = self.evaluate(tf.function(_fn, experimental_compile=True)())
else:
paths = self.evaluate(_fn())
self.assertAllEqual(paths.shape, [num_samples, 3])
paths = paths[:, -1] # Extract paths values for the terminal time
mean = np.mean(paths, axis=0)
variance = np.var(paths, axis=0)
self.assertAllClose(mean, self.true_mean(
1.0, self.mean_reversion_1_factor, self.volatility_1_factor,
self.initial_state, self.instant_forward_rate(1.0))[0],
rtol=1e-4, atol=1e-4)
self.assertAllClose(variance, self.true_var(
1.0, self.mean_reversion_1_factor, self.volatility_1_factor)[0],
rtol=1e-4, atol=1e-4)
def test_zcb_variance_1_factor(self):
"""Tests 1-Factor model with constant parameters."""
num_samples = 10_000
for dtype in [tf.float64]:
curve_times = np.array([0., 0.5, 1.0, 5.0, 10.0])
times = np.array([0.1, 0.5, 1.0, 3])
process = tff.models.hjm.QuasiGaussianHJM(
dim=1,
mean_reversion=self.mean_reversion_1_factor,
volatility=self.volatility_1_factor,
initial_discount_rate_fn=self.instant_forward_rate,
dtype=dtype)
# generate zero coupon paths
paths, _, _ = process.sample_discount_curve_paths(
times,
curve_times=curve_times,
num_samples=num_samples,
time_step=0.1,
random_type=tff.math.random.RandomType.SOBOL,
seed=[1, 2],
skip=1000000)
self.assertEqual(paths.dtype, dtype)
paths = self.evaluate(paths)
self.assertAllEqual(paths.shape, [num_samples, 5, 4])
sampled_std = tf.math.reduce_std(tf.math.log(paths), axis=0)
for tidx in range(4):
true_std = self.true_zcb_std(times[tidx], curve_times + times[tidx],
self.volatility_1_factor[0],
self.mean_reversion_1_factor[0])
self.assertAllClose(
sampled_std[:, tidx], true_std, rtol=1e-3, atol=1e-3)
@parameterized.named_parameters({
'testcase_name': '1d',
'dim': 1,
'corr_matrix': None,
'dtype': None,
}, {
'testcase_name': '2d',
'dim': 2,
'corr_matrix': None,
'dtype': tf.float32,
}, {
'testcase_name': '2d_with_corr',
'dim': 2,
'corr_matrix': [[[1.0, 0.5], [0.5, 1.0]], [[1.0, 0.7], [0.7, 1.0]]],
'dtype': tf.float64,
})
def test_mean_and_variance_batch(self, dim, corr_matrix, dtype):
"""Tests batch of 1-Factor model with constant parameters."""
if dim == 1:
mr = self.mean_reversion_batch_1_factor
vol = self.volatility_batch_1_factor
else:
mr = self.mean_reversion_batch_2_factor
vol = self.volatility_batch_2_factor
process = tff.models.hjm.QuasiGaussianHJM(
dim=dim,
mean_reversion=mr,
volatility=vol,
initial_discount_rate_fn=self.instant_forward_rate_batch,
corr_matrix=corr_matrix,
dtype=dtype)
paths, _, _, _ = process.sample_paths(
[0.1, 0.5, 1.0],
num_samples=20000,
time_step=0.1,
num_time_steps=None,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
seed=[1, 2],
skip=1000000)
paths = self.evaluate(paths)
self.assertAllEqual(paths.shape, [2, 20000, 3])
paths = paths[:, :, -1] # Extract paths values for the terminal time
mean = np.mean(paths, axis=-1)
variance = np.var(paths, axis=-1)
f_0_t = [0.01, 0.02]
for i in range(2):
if dim == 1:
eff_vol = vol[i][0]
else:
if corr_matrix is None:
c = 0.0
else:
c = corr_matrix[i][1][0]
eff_vol = np.sqrt(vol[i][0]**2 + vol[i][1]**2 + 2*c*vol[i][0]*vol[i][1])
with self.subTest('CloseMean'):
self.assertAllClose(
mean[i], self.true_mean(
1.0, mr[i][0], eff_vol, self.initial_state_batch[i][0][0],
f_0_t[i]), rtol=1e-4, atol=1e-4)
with self.subTest('CloseStd'):
self.assertAllClose(
variance[i], self.true_var(1.0, mr[i][0], eff_vol),
rtol=1e-4, atol=1e-4)
def test_zcb_variance_batch_1_factor(self):
"""Tests batch of 1-Factor model with constant parameters."""
num_samples = 100000
for dtype in [tf.float64]:
curve_times = np.array([0., 0.5, 1.0, 5.0, 10.0])
times = np.array([0.1, 0.5, 1.0, 3])
process = tff.models.hjm.QuasiGaussianHJM(
dim=1,
mean_reversion=self.mean_reversion_batch_1_factor,
volatility=self.volatility_batch_1_factor,
initial_discount_rate_fn=self.instant_forward_rate_batch,
dtype=dtype)
# generate zero coupon paths
paths, _, _ = process.sample_discount_curve_paths(
times,
curve_times=curve_times,
num_samples=num_samples,
time_step=0.1,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
seed=[1, 2],
skip=1000000)
self.assertEqual(paths.dtype, dtype)
paths = self.evaluate(paths)
self.assertAllEqual(paths.shape, [2, num_samples, 5, 4])
sampled_std = tf.math.reduce_std(tf.math.log(paths), axis=1)
for i in range(2):
for tidx in range(4):
true_std = self.true_zcb_std(times[tidx], curve_times + times[tidx],
self.volatility_batch_1_factor[i][0],
self.mean_reversion_batch_1_factor[i][0])
with self.subTest('Batch_{}_time_index{}'.format(i, tidx)):
self.assertAllClose(
sampled_std[i, :, tidx], true_std, rtol=5e-4, atol=5e-4)
@parameterized.named_parameters(
{
'testcase_name': 'float32',
'dtype': np.float32,
}, {
'testcase_name': 'float64',
'dtype': np.float64,
})
def test_time_dependent_1d(self, dtype):
"""Tests 1-factor model with time dependent vol."""
num_samples = 100000
def discount_fn(x):
return 0.01 * tf.ones_like(x, dtype=dtype) # pylint: disable=cell-var-from-loop
volatility = tff.math.piecewise.PiecewiseConstantFunc(
[0.1, 2.0], values=self.volatility_time_dep_1_factor, dtype=dtype)
def _vol_fn(t, r):
del r
return volatility([t])
process = tff.models.hjm.QuasiGaussianHJM(
dim=1,
mean_reversion=self.mean_reversion_1_factor,
volatility=_vol_fn,
initial_discount_rate_fn=discount_fn,
dtype=dtype)
times = np.array([0.1, 1.0, 2.0, 3.0])
paths, _, _, _ = process.sample_paths(
times,
num_samples=num_samples,
time_step=0.1,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
seed=[1, 2],
skip=1000000)
self.assertEqual(paths.dtype, dtype)
paths = self.evaluate(paths)
self.assertAllEqual(paths.shape, [num_samples, 4])
r_std = np.squeeze(np.std(paths, axis=0))
expected_std = self.true_std_time_dep(
times, np.array([0.0, 0.1, 2.0]),
np.array(self.volatility_time_dep_1_factor),
self.mean_reversion_1_factor[0])
self.assertAllClose(r_std, expected_std, rtol=1.75e-4, atol=1.75e-4)
@parameterized.named_parameters(
{
'testcase_name': 'float64',
'dtype': np.float64,
})
def test_state_dependent_vol_1_factor(self, dtype):
"""Tests 1-factor model with state dependent vol."""
num_samples = 100000
def discount_fn(x):
return 0.01 * tf.ones_like(x, dtype=dtype) # pylint: disable=cell-var-from-loop
volatility = tff.math.piecewise.PiecewiseConstantFunc(
[], values=self.volatility_1_factor, dtype=dtype)
def _vol_fn(t, r):
return volatility([t]) * tf.math.abs(r)**0.5
process = tff.models.hjm.QuasiGaussianHJM(
dim=1,
mean_reversion=self.mean_reversion_1_factor,
volatility=_vol_fn,
initial_discount_rate_fn=discount_fn,
dtype=dtype)
times = np.array([0.1, 1.0, 2.0, 3.0])
_, discount_paths, _, _ = process.sample_paths(
times,
num_samples=num_samples,
time_step=0.1,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
seed=[1, 2],
skip=1000000)
self.assertEqual(discount_paths.dtype, dtype)
discount_paths = self.evaluate(discount_paths)
self.assertAllEqual(discount_paths.shape, [num_samples, 4])
discount_mean = np.mean(discount_paths, axis=0)
expected_mean = np.exp(-0.01 * times)
self.assertAllClose(discount_mean, expected_mean, rtol=2e-4, atol=2e-4)
@parameterized.named_parameters(
{
'testcase_name': 'float64',
'dtype': np.float64,
})
def test_correctness_4_factor(self, dtype):
"""Tests 4-factor model with constant vol."""
num_samples = 100000
def discount_fn(x):
return 0.01 * tf.ones_like(x, dtype=dtype) # pylint: disable=cell-var-from-loop
process = tff.models.hjm.QuasiGaussianHJM(
dim=4,
mean_reversion=self.mean_reversion_4_factor,
volatility=self.volatility_4_factor,
initial_discount_rate_fn=discount_fn,
dtype=dtype)
times = np.array([0.1, 1.0, 2.0, 3.0])
_, discount_paths, _, _ = process.sample_paths(
times,
num_samples=num_samples,
time_step=0.1,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
seed=[1, 2],
skip=1000000)
self.assertEqual(discount_paths.dtype, dtype)
discount_paths = self.evaluate(discount_paths)
self.assertAllEqual(discount_paths.shape, [num_samples, 4])
discount_mean = np.mean(discount_paths, axis=0)
expected_mean = np.exp(-0.01 * times)
self.assertAllClose(discount_mean, expected_mean, rtol=1e-4, atol=1e-4)
@parameterized.named_parameters(
{
'testcase_name': 'float64',
'dtype': np.float64,
})
def test_correctness_2_factor_with_correlation(self, dtype):
"""Tests 2-factor correlated model with constant vol."""
num_samples = 100000
def discount_fn(x):
return 0.01 * tf.ones_like(x, dtype=dtype) # pylint: disable=cell-var-from-loop
process = tff.models.hjm.QuasiGaussianHJM(
dim=2,
mean_reversion=self.mean_reversion_4_factor[:2],
volatility=self.volatility_4_factor[:2],
corr_matrix=[[1.0, 0.5], [0.5, 1.0]],
initial_discount_rate_fn=discount_fn,
dtype=dtype)
times = np.array([0.1, 1.0, 2.0, 3.0])
_, discount_paths, _, _ = process.sample_paths(
times,
num_samples=num_samples,
time_step=0.1,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
seed=[1, 2],
skip=1000000)
self.assertEqual(discount_paths.dtype, dtype)
discount_paths = self.evaluate(discount_paths)
self.assertAllEqual(discount_paths.shape, [num_samples, 4])
discount_mean = np.mean(discount_paths, axis=0)
expected_mean = np.exp(-0.01 * times)
self.assertAllClose(discount_mean, expected_mean, rtol=1e-4, atol=1e-4)
def test_zcb_variance_2_factor(self):
"""Tests ZCB for sims 2-Factor correlated model."""
num_samples = 100000
for dtype in [tf.float64]:
curve_times = np.array([0., 0.5, 1.0, 2.0, 5.0])
times = np.array([0.1, 0.5, 1.0, 3])
process = tff.models.hjm.QuasiGaussianHJM(
dim=2,
mean_reversion=[0.03, 0.03],
volatility=[0.005, 0.005],
corr_matrix=[[1.0, 0.5], [0.5, 1.0]],
initial_discount_rate_fn=self.instant_forward_rate,
dtype=dtype)
# generate zero coupon paths
paths, _, _ = process.sample_discount_curve_paths(
times,
curve_times=curve_times,
num_samples=num_samples,
time_step=0.1,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
seed=[1, 2],
skip=1000000)
self.assertEqual(paths.dtype, dtype)
paths = self.evaluate(paths)
self.assertAllEqual(paths.shape, [num_samples, 5, 4])
sampled_std = tf.math.reduce_std(tf.math.log(paths), axis=0)
for tidx in range(4):
true_std = self.true_zcb_std(times[tidx], curve_times + times[tidx],
0.005, 0.03)
self.assertAllClose(
sampled_std[:, tidx], np.sqrt(3) * true_std, rtol=1e-3, atol=1e-3)
if __name__ == '__main__':
tf.test.main()
| |
try :
from facepy import GraphAPI
from facepy.exceptions import OAuthError
import time
from sys import stdout
except ImportError:
print "Import Error"
token = 'Enter-Token-Here'
OWNER_NAME = ''
photos_together = {}
no_of_comments = {}
words_in_comment = {}
no_of_messages = {}
total_chat_length = {}
def process_photo_tags(tags):
#Until we get an empty result page
if 'error' in tags:
print "Error = ", error
raise Exception("Error in Response")
if 'data' not in tags:
return
while len(tags['data']) > 0:
#Iterating through all the tags in the current result page
for tagged_person in tags['data']:
name = tagged_person['name'].encode('utf-8')
if name == OWNER_NAME:
continue
if name in photos_together:
#If the tag was encountered before increment
photos_together[name] += 1
else:
#Else initialize new count
photos_together[name] = 1
#Get the nect result page
if 'paging' in tags and 'next' in tags['paging']:
request_str = tags['paging']['next'].replace('https://graph.facebook.com/', '')
request_str = request_str.replace('limit=25', 'limit=200')
tags = graph.get(request_str)
else:
tags['data'] = []
def process_photo_comments(comments):
if 'error' in comments:
print "Error = ", error
raise Exception("Error in Response")
if 'data' not in comments:
return
while len(comments['data']) > 0:
for comment in comments['data']:
try:
commentor = comment['from']['name'].encode('utf-8')
if commentor == OWNER_NAME:
#Ignore Comment by owner on his own photos
continue
word_count = len(comment['message'].encode('utf-8').split())
except UnicodeEncodeError:
print comment['message']
raise Exception('Unicode Encoding Error Encountered')
if commentor in no_of_comments:
#If a comment by this person was encountered before
no_of_comments[commentor] += 1
words_in_comment[commentor] += word_count
else:
#If this is a new commentor
no_of_comments[commentor] = 1
words_in_comment[commentor] = word_count
if 'paging' in comments and 'next' in comments['paging']:
request_str = comments['paging']['next'].replace('https://graph.facebook.com/', '')
request_str = request_str.replace('limit=25', 'limit=200')
comments = graph.get(request_str)
else:
comments['data'] = []
def process_photos(photos):
if 'error' in photos:
print "Error = ", error
raise Exception("Error in Response")
no_of_photos = 0
if 'data' not in photos:
return
while len(photos['data']) > 0:
for photo in photos['data']:
if 'tags' in photo:
process_photo_tags(photo['tags'])
if 'comments' in photo:
process_photo_comments(photo['comments'])
no_of_photos += 1
stdout.write("\rNumber of Photos Processed = %d" % no_of_photos)
stdout.flush()
if 'paging' in photos and 'next' in photos['paging']:
request_str = photos['paging']['next'].replace('https://graph.facebook.com/', '')
request_str = request_str.replace('limit=25', 'limit=200')
photos = graph.get(request_str)
else:
photos['data'] = []
def process_texts(texts, friend_name):
if 'error' in texts:
print "Error = ", error
raise Exception("Error in Response")
if 'data' not in texts:
return
while len(texts['data']) > 0:
for text in texts['data']:
if 'message' not in text:
#This can happen in message with only an attachment and No text
continue
if friend_name in no_of_messages:
no_of_messages[friend_name] += 1
total_chat_length[friend_name] += len(text['message'])
else:
no_of_messages[friend_name] = 1
total_chat_length[friend_name] = len(text['message'])
if 'paging' in texts and 'next' in texts['paging']:
request_str = texts['paging']['next'].replace('https://graph.facebook.com/', '')
request_str = request_str.replace('limit=25', 'limit=100')
success = False
while not success:
try:
texts = graph.get(request_str)
success = True
except OAuthError:
stdout.write("\nCall Limit Exceeded ! Sleeping for 4 min before retrying !!\n")
for i in range(250):
stdout.write("\rSleeing.......%d" % i)
stdout.flush()
time.sleep(1)
stdout.write("Woke Up! Retrying !!\n")
else:
texts['data'] = []
def process_all_messages(messages):
if 'error' in messages:
print "Error = ", error
raise Exception("Error in Response")
if 'data' not in messages:
return
while len(messages['data']) > 0:
for chat in messages['data']:
if len(chat['to']['data']) != 2:
#Ignore Group and self messages
continue
friend_name = chat['to']['data'][1]['name'].encode('utf-8')
if friend_name == OWNER_NAME:
friend_name = chat['to']['data'][0]['name'].encode('utf-8')
success = False
while not success:
try:
stdout.write("\rProcessing Chat With : %s " % friend_name)
stdout.flush()
process_texts(chat['comments'], friend_name)
success = True
except OAuthError:
stdout.write("\nCall Limit Exceeded ! Sleeping for 10 min before retrying !!")
stdout.flush()
no_of_messages[friend_name] = 0
total_chat_length[friend_name] = 0
stdout.write('\n')
for i in range(600):
stdout.write("\rSleeing.......%d" % i)
stdout.flush()
time.sleep(1)
stdout.write("Woke Up! Retrying !!")
if 'paging' in messages and 'next' in messages['paging']:
request_str = messages['paging']['next'].replace('https://graph.facebook.com/', '')
request_str = request_str.replace('limit=25', 'limit=400')
messages = graph.get(request_str)
else:
mesages['data'] = []
graph = GraphAPI(token)
me = graph.get('v2.0/me?fields=id,name')
OWNER_NAME = me['name'].encode('utf-8')
photos = graph.get('v2.0/me/photos?fields=comments{message,from},tags{name}&limit=100')
process_photos(photos)
stdout.write('\n\n')
stdout.flush()
inbox = graph.get('v2.0/me/inbox?fields=comments{message},to&limit=100')
process_all_messages(inbox)
top_photos = []
for people in photos_together:
temp = []
temp.append(people)
temp.append(photos_together[people])
top_photos.append(temp)
top_photos.sort(key=lambda x: x[1], reverse=True)
print "Top People Whom You share photos"
for i in range(5):
print i+1, ". ", top_photos[i][0], " - ", top_photos[i][1]
top_commentors = []
for people in no_of_comments:
temp = []
temp.append(people)
temp.append(no_of_comments[people])
top_commentors.append(temp)
top_commentors.sort(key=lambda x: x[1], reverse=True)
print "Top People Who comments on your photo"
for i in range(5):
print i+1, ". ", top_commentors[i][0], " - ", top_commentors[i][1]
long_commentors = []
for people in words_in_comment:
temp = []
temp.append(people)
temp.append(words_in_comment[people])
long_commentors.append(temp)
long_commentors.sort(key=lambda x: x[1], reverse=True)
print "Top People with most content in comments"
for i in range(5):
print i+1, ". ", long_commentors[i][0], " - ", long_commentors[i][1]
top_chatboxes = []
for people in no_of_messages:
temp = []
temp.append(people)
temp.append(no_of_messages[people])
top_chatboxes.append(temp)
top_chatboxes.sort(key=lambda x:x[1], reverse=True)
print "Top people with most number of Messages"
for i in range(5):
print i+1, ". ", top_chatboxes[i][0], " - ", top_chatboxes[i][1]
long_chats = []
for people in total_chat_length:
temp = []
temp.append(people)
temp.append(total_chat_length[people])
long_chats.append(temp)
long_chats.sort(key=lambda x: x[1], reverse=True)
print "Top People with most content in inbox"
for i in range(5):
print i+1, ". ", long_chats[i][0], " - ", long_chats[i][1]
total_count_of_comments = 0
for num in top_commentors:
total_count_of_comments += num[1]
print "Total Number of comments across all pics = ", total_count_of_comments
| |
"""DenseNet models for Keras.
# Reference
- [Densely Connected Convolutional Networks](https://arxiv.org/pdf/1608.06993.pdf)
- [The One Hundred Layers Tiramisu: Fully Convolutional DenseNets for Semantic Segmentation](https://arxiv.org/pdf/1611.09326.pdf)
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import warnings
from keras.models import Model
from keras.layers.core import Dropout, Activation, Reshape
from keras.layers.convolutional import Convolution2D, Deconvolution2D, AtrousConvolution2D, UpSampling2D
from keras.layers.pooling import AveragePooling2D
from keras.layers import Input, merge
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
from keras.engine.topology import get_source_inputs
from keras.applications.imagenet_utils import _obtain_input_shape
import keras.backend as K
from layers import SubPixelUpscaling
def DenseNetFCN(input_shape, nb_dense_block=5, growth_rate=16, nb_layers_per_block=4,
reduction=0.0, dropout_rate=0.0, weight_decay=1E-4, init_conv_filters=48,
include_top=True, weights=None, input_tensor=None, classes=1, activation='softmax',
upsampling_conv=128, upsampling_type='upsampling', batchsize=None):
"""Instantiate the DenseNet FCN architecture.
Note that when using TensorFlow,
for best performance you should set
`image_dim_ordering="tf"` in your Keras config
at ~/.keras/keras.json.
# Arguments
nb_dense_block: number of dense blocks to add to end (generally = 3)
growth_rate: number of filters to add per dense block
nb_layers_per_block: number of layers in each dense block.
Can be a positive integer or a list.
If positive integer, a set number of layers per dense block.
If list, nb_layer is used as provided. Note that list size must
be (nb_dense_block + 1)
reduction: reduction factor of transition blocks.
Note : reduction value is inverted to compute compression.
dropout_rate: dropout rate
weight_decay: weight decay factor
init_conv_filters: number of layers in the initial convolution layer
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization) or
"cifar10" (pre-training on CIFAR-10)..
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(32, 32, 3)` (with `tf` dim ordering)
or `(3, 32, 32)` (with `th` dim ordering).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 8.
E.g. `(200, 200, 3)` would be one valid value.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
Note that if sigmoid is used, classes must be 1.
upsampling_conv: number of convolutional layers in upsampling via subpixel convolution
upsampling_type: Can be one of 'upsampling', 'deconv', 'atrous' and
'subpixel'. Defines type of upsampling algorithm used.
batchsize: Fixed batch size. This is a temporary requirement for
computation of output shape in the case of Deconvolution2D layers.
Parameter will be removed in next iteration of Keras, which infers
output shape of deconvolution layers automatically.
# Returns
A Keras model instance.
"""
if weights not in {None}:
raise ValueError('The `weights` argument should be '
'`None` (random initialization) as no '
'model weights are provided.')
upsampling_type = upsampling_type.lower()
if upsampling_type not in ['upsampling', 'deconv', 'atrous', 'subpixel']:
raise ValueError('Parameter "upsampling_type" must be one of "upsampling", '
'"deconv", "atrous" or "subpixel".')
if upsampling_type == 'deconv' and batchsize is None:
raise ValueError('If "upsampling_type" is deconvoloution, then a fixed '
'batch size must be provided in batchsize parameter.')
if input_shape is None:
raise ValueError('For fully convolutional models, input shape must be supplied.')
if type(nb_layers_per_block) is not list and nb_dense_block < 1:
raise ValueError('Number of dense layers per block must be greater than 1. Argument '
'value was %d.' % (nb_layers_per_block))
if upsampling_type == 'atrous':
warnings.warn(
'Atrous Convolution upsampling does not correctly work (see https://github.com/fchollet/keras/issues/4018).\n'
'Switching to `upsampling` type upscaling.')
upsampling_type = 'upsampling'
if activation not in ['softmax', 'sigmoid']:
raise ValueError('activation must be one of "softmax" or "sigmoid"')
if activation == 'sigmoid' and classes != 1:
raise ValueError('sigmoid activation can only be used when classes = 1')
# Determine proper input shape
min_size = 2 ** nb_dense_block
if K.image_dim_ordering() == 'th':
if input_shape is not None:
if ((input_shape[1] is not None and input_shape[1] < min_size) or
(input_shape[2] is not None and input_shape[2] < min_size)):
raise ValueError('Input size must be at least ' +
str(min_size) + 'x' + str(min_size) + ', got '
'`input_shape=' + str(input_shape) + '`')
else:
input_shape = (classes, None, None)
else:
if input_shape is not None:
if ((input_shape[0] is not None and input_shape[0] < min_size) or
(input_shape[1] is not None and input_shape[1] < min_size)):
raise ValueError('Input size must be at least ' +
str(min_size) + 'x' + str(min_size) + ', got '
'`input_shape=' + str(input_shape) + '`')
else:
input_shape = (None, None, classes)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = __create_fcn_dense_net(classes, img_input, include_top, nb_dense_block,
growth_rate, reduction, dropout_rate, weight_decay,
nb_layers_per_block, upsampling_conv, upsampling_type,
batchsize, init_conv_filters, input_shape)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='fcn-densenet')
return model
def __conv_block(ip, nb_filter, bottleneck=False, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, Relu, 3x3 Conv2D, optional bottleneck block and dropout
Args:
ip: Input keras tensor
nb_filter: number of filters
bottleneck: add bottleneck block
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor with batch_norm, relu and convolution2d added (optional bottleneck)
'''
concat_axis = 1 if K.image_dim_ordering() == "th" else -1
x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(ip)
x = Activation('relu')(x)
if bottleneck:
inter_channel = nb_filter * 4 # Obtained from https://github.com/liuzhuang13/DenseNet/blob/master/densenet.lua
x = Convolution2D(inter_channel, 1, 1, init='he_uniform', border_mode='same', bias=False,
W_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(x)
x = Activation('relu')(x)
x = Convolution2D(nb_filter, 3, 3, init="he_uniform", border_mode="same", bias=False,
W_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def __transition_block(ip, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, Relu 1x1, Conv2D, optional compression, dropout and Maxpooling2D
Args:
ip: keras tensor
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps
in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
Returns: keras tensor, after applying batch_norm, relu-conv, dropout, maxpool
'''
concat_axis = 1 if K.image_dim_ordering() == "th" else -1
x = BatchNormalization(mode=0, axis=concat_axis, gamma_regularizer=l2(weight_decay),
beta_regularizer=l2(weight_decay))(ip)
x = Activation('relu')(x)
x = Convolution2D(int(nb_filter * compression), 1, 1, init="he_uniform", border_mode="same", bias=False,
W_regularizer=l2(weight_decay))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2))(x)
return x
def __dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=False, dropout_rate=None, weight_decay=1E-4,
grow_nb_filters=True, return_concat_list=False):
''' Build a dense_block where the output of each conv_block is fed to subsequent ones
Args:
x: keras tensor
nb_layers: the number of layers of conv_block to append to the model.
nb_filter: number of filters
growth_rate: growth rate
bottleneck: bottleneck block
dropout_rate: dropout rate
weight_decay: weight decay factor
grow_nb_filters: flag to decide to allow number of filters to grow
return_concat_list: return the list of feature maps along with the actual output
Returns: keras tensor with nb_layers of conv_block appended
'''
concat_axis = 1 if K.image_dim_ordering() == "th" else -1
x_list = [x]
for i in range(nb_layers):
x = __conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay)
x_list.append(x)
x = merge(x_list, mode='concat', concat_axis=concat_axis)
if grow_nb_filters:
nb_filter += growth_rate
if return_concat_list:
return x, nb_filter, x_list
else:
return x, nb_filter
def __transition_up_block(ip, nb_filters, type='upsampling', output_shape=None, weight_decay=1E-4):
''' SubpixelConvolutional Upscaling (factor = 2)
Args:
ip: keras tensor
nb_filters: number of layers
type: can be 'upsampling', 'subpixel', 'deconv', or 'atrous'. Determines type of upsampling performed
output_shape: required if type = 'deconv'. Output shape of tensor
weight_decay: weight decay factor
Returns: keras tensor, after applying upsampling operation.
'''
if type == 'upsampling':
x = UpSampling2D()(ip)
elif type == 'subpixel':
x = Convolution2D(nb_filters, 3, 3, activation="relu", border_mode='same', W_regularizer=l2(weight_decay),
bias=False, init='he_uniform')(ip)
x = SubPixelUpscaling(scale_factor=2)(x)
x = Convolution2D(nb_filters, 3, 3, activation="relu", border_mode='same', W_regularizer=l2(weight_decay),
bias=False, init='he_uniform')(x)
elif type == 'atrous':
# waiting on https://github.com/fchollet/keras/issues/4018
x = AtrousConvolution2D(nb_filters, 3, 3, activation="relu", W_regularizer=l2(weight_decay),
bias=False, atrous_rate=(2, 2), init='he_uniform')(ip)
else:
x = Deconvolution2D(nb_filters, 3, 3, output_shape, activation='relu', border_mode='same',
subsample=(2, 2), init='he_uniform')(ip)
return x
def __create_fcn_dense_net(nb_classes, img_input, include_top, nb_dense_block=5, growth_rate=12,
reduction=0.0, dropout_rate=None, weight_decay=1E-4,
nb_layers_per_block=4, nb_upsampling_conv=128, upsampling_type='upsampling',
batchsize=None, init_conv_filters=48, input_shape=None, activation='softmax'):
''' Build the DenseNet model
Args:
nb_classes: number of classes
img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
include_top: flag to include the final Dense layer
nb_dense_block: number of dense blocks to add to end (generally = 3)
growth_rate: number of filters to add per dense block
reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression
dropout_rate: dropout rate
weight_decay: weight decay
nb_layers_per_block: number of layers in each dense block.
Can be a positive integer or a list.
If positive integer, a set number of layers per dense block.
If list, nb_layer is used as provided. Note that list size must
be (nb_dense_block + 1)
nb_upsampling_conv: number of convolutional layers in upsampling via subpixel convolution
upsampling_type: Can be one of 'upsampling', 'deconv', 'atrous' and
'subpixel'. Defines type of upsampling algorithm used.
batchsize: Fixed batch size. This is a temporary requirement for
computation of output shape in the case of Deconvolution2D layers.
Parameter will be removed in next iteration of Keras, which infers
output shape of deconvolution layers automatically.
input_shape: Only used for shape inference in fully convolutional networks.
activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
Note that if sigmoid is used, classes must be 1.
Returns: keras tensor with nb_layers of conv_block appended
'''
concat_axis = 1 if K.image_dim_ordering() == "th" else -1
if concat_axis == 1: # th dim ordering
_, rows, cols = input_shape
else:
rows, cols, _ = input_shape
if reduction != 0.0:
assert reduction <= 1.0 and reduction > 0.0, "reduction value must lie between 0.0 and 1.0"
# check if upsampling_conv has minimum number of filters
# minimum is set to 12, as at least 3 color channels are needed for correct upsampling
assert nb_upsampling_conv > 12 and nb_upsampling_conv % 4 == 0, "Parameter `upsampling_conv` number of channels must " \
"be a positive number divisible by 4 and greater " \
"than 12"
# layers in each dense block
if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
nb_layers = list(nb_layers_per_block) # Convert tuple to list
assert len(nb_layers) == (nb_dense_block + 1), "If list, nb_layer is used as provided. " \
"Note that list size must be (nb_dense_block + 1)"
bottleneck_nb_layers = nb_layers[-1]
rev_layers = nb_layers[::-1]
nb_layers.extend(rev_layers[1:])
else:
bottleneck_nb_layers = nb_layers_per_block
nb_layers = [nb_layers_per_block] * (2 * nb_dense_block + 1)
# compute compression factor
compression = 1.0 - reduction
# Initial convolution
x = Convolution2D(init_conv_filters, 3, 3, init="he_uniform", border_mode="same", name="initial_conv2D", bias=False,
W_regularizer=l2(weight_decay))(img_input)
nb_filter = init_conv_filters
skip_list = []
# Add dense blocks and transition down block
for block_idx in range(nb_dense_block):
x, nb_filter = __dense_block(x, nb_layers[block_idx], nb_filter, growth_rate,
dropout_rate=dropout_rate, weight_decay=weight_decay)
# Skip connection
skip_list.append(x)
# add transition_block
x = __transition_block(x, nb_filter, compression=compression, dropout_rate=dropout_rate,
weight_decay=weight_decay)
nb_filter = int(nb_filter * compression) # this is calculated inside transition_down_block
# The last dense_block does not have a transition_down_block
# return the concatenated feature maps without the concatenation of the input
_, nb_filter, concat_list = __dense_block(x, bottleneck_nb_layers, nb_filter, growth_rate,
dropout_rate=dropout_rate, weight_decay=weight_decay,
return_concat_list=True)
skip_list = skip_list[::-1] # reverse the skip list
if K.image_dim_ordering() == 'th':
out_shape = [batchsize, nb_filter, rows // 16, cols // 16]
else:
out_shape = [batchsize, rows // 16, cols // 16, nb_filter]
# Add dense blocks and transition up block
for block_idx in range(nb_dense_block):
n_filters_keep = growth_rate * nb_layers[nb_dense_block + block_idx]
if K.image_dim_ordering() == 'th':
out_shape[1] = n_filters_keep
else:
out_shape[3] = n_filters_keep
# upsampling block must upsample only the feature maps (concat_list[1:]),
# not the concatenation of the input with the feature maps (concat_list[0].
l = merge(concat_list[1:], mode='concat', concat_axis=concat_axis)
t = __transition_up_block(l, nb_filters=n_filters_keep, type=upsampling_type, output_shape=out_shape)
# concatenate the skip connection with the transition block
x = merge([t, skip_list[block_idx]], mode='concat', concat_axis=concat_axis)
if K.image_dim_ordering() == 'th':
out_shape[2] *= 2
out_shape[3] *= 2
else:
out_shape[1] *= 2
out_shape[2] *= 2
# Dont allow the feature map size to grow in upsampling dense blocks
_, nb_filter, concat_list = __dense_block(x, nb_layers[nb_dense_block + block_idx + 1], nb_filter=growth_rate,
growth_rate=growth_rate, dropout_rate=dropout_rate,
weight_decay=weight_decay,
return_concat_list=True, grow_nb_filters=False)
if include_top:
x = Convolution2D(nb_classes, 1, 1, activation='linear', border_mode='same', W_regularizer=l2(weight_decay),
bias=False)(x)
if K.image_dim_ordering() == 'th':
channel, row, col = input_shape
else:
row, col, channel = input_shape
x = Reshape((row * col, nb_classes))(x)
x = Activation(activation)(x)
x = Reshape((row, col, nb_classes))(x)
return x
if __name__ == '__main__':
model = DenseNetFCN((32, 32, 1), nb_dense_block=5, growth_rate=16,
nb_layers_per_block=4, upsampling_type='upsampling', classes=1, activation='sigmoid')
model.summary()
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancersOperations(object):
"""LoadBalancersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.LoadBalancer"
"""Gets the specified load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LoadBalancer, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_08_01.models.LoadBalancer
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
parameters, # type: "_models.LoadBalancer"
**kwargs # type: Any
):
# type: (...) -> "_models.LoadBalancer"
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'LoadBalancer')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
parameters, # type: "_models.LoadBalancer"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.LoadBalancer"]
"""Creates or updates a load balancer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param parameters: Parameters supplied to the create or update load balancer operation.
:type parameters: ~azure.mgmt.network.v2019_08_01.models.LoadBalancer
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either LoadBalancer or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_08_01.models.LoadBalancer]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancer"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.LoadBalancer"
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancer"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.LoadBalancer"]
"""Updates a load balancer tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param parameters: Parameters supplied to update load balancer tags.
:type parameters: ~azure.mgmt.network.v2019_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either LoadBalancer or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_08_01.models.LoadBalancer]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancer"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('LoadBalancer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LoadBalancerListResult"]
"""Gets all the load balancers in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_08_01.models.LoadBalancerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/loadBalancers'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LoadBalancerListResult"]
"""Gets all the load balancers in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_08_01.models.LoadBalancerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers'} # type: ignore
| |
import struct
from collections import namedtuple
import os
from functools import lru_cache
import argparse
import logging
from datetime import datetime, timedelta
filetypes = {
0: 'fnode_file',
1: 'free_space_map',
2: 'free_fnodes_map',
3: 'space_accounting_file',
4: 'bad_device_blocks_file',
6: 'directory',
8: 'data',
9: 'unknown',
}
Flags = namedtuple('Flags', ['allocated', 'long_file', 'modified', 'deleted'])
ISOVolumeLabel = namedtuple(
'ISOVolumeLabel',
[
'label', 'name', 'structure', 'recording_side',
'interleave_factor', 'iso_version'
]
)
RMXVolumeInformation = namedtuple(
'RMXVolumeInformation',
[
'name',
'file_driver',
'block_size',
'volume_size',
'num_fnodes',
'fnode_start',
'fnode_size',
'root_fnode',
]
)
FileNode = namedtuple(
'FileNode',
[
'flags', 'type', 'granularity', 'owner', 'creation_time', 'access_time',
'modification_time', 'total_size', 'total_blocks', 'block_pointers',
'size', 'id_count', 'access_rights', 'parent',
]
)
class File:
def __init__(self, abspath, filesystem):
self.fnode = filesystem._path_to_fnode(abspath)
assert self.fnode.type == 'data'
self.creation_time = self.fnode.creation_time
self.modification_time = self.fnode.creation_time
self.access_time = self.fnode.access_time
self.filesystem = filesystem
self.abspath = abspath
self.name = os.path.basename(abspath)
def read(self):
return self.filesystem._gather_blocks(self.fnode.block_pointers)
def __repr__(self):
return 'File({}) at {}'.format(self.abspath, self.filesystem.fp.name)
class Directory:
def __init__(self, abspath, filesystem):
self.fnode = filesystem._path_to_fnode(abspath)
assert self.fnode.type == 'directory'
self.filesystem = filesystem
self.abspath = abspath
self.creation_time = self.fnode.creation_time
self.modification_time = self.fnode.creation_time
self.access_time = self.fnode.access_time
self.files = []
self.directories = []
for name, fnode in filesystem._read_directory(self.fnode).items():
if fnode.type == 'data':
self.files.append(name)
elif fnode.type == 'directory':
self.directories.append(name)
def __getitem__(self, path):
return self.filesystem[os.path.join(self.abspath, path)]
def ls(self):
return {'dirs': self.directories, 'files': self.files}
def walk(self):
return self.filesystem.walk(self.abspath)
def __repr__(self):
return 'Directory({}) at {}'.format(self.abspath, self.filesystem.fp.name)
BlockPointer = namedtuple('BlockPointer', ['num_blocks', 'first_block'])
class FileSystem:
def __init__(self, filename, epoch=datetime(1978, 1, 1)):
self.fp = open(filename, 'rb')
self.epoch = epoch
self._fnodes = {}
self._read_iso_vol_label()
self._read_rmx_volume_information()
self._read_fnode_file()
self._root = self._fnodes[self.rmx_volume_information.root_fnode]
self._cwd = '/'
def __getitem__(self, path):
path = self.abspath(path)
fnode = self._path_to_fnode(path)
if fnode.type == 'data':
return File(path, self)
elif fnode.type == 'directory':
return Directory(path, self)
def __repr__(self):
return 'iRmx86-Filesystem at {}'.format(self.fp.name)
@lru_cache()
def _path_to_fnode(self, path):
path = self.abspath(path)
*dirs, filename = path.split('/')[1:]
current_dir = self._read_directory(self._root)
current_node = self._root
for d in dirs:
try:
current_node = current_dir[d]
current_dir = self._read_directory(current_dir[d])
except KeyError:
raise IOError('No such file or directory: {}'.format(path))
if filename:
try:
node = current_dir[filename]
except KeyError:
raise IOError('No such file or directory: {}'.format(path))
else:
node = current_node
return node
def _read_without_position_change(self, start, num_bytes):
current_position = self.fp.tell()
self.fp.seek(start, 0)
b = self.fp.read(num_bytes)
self.fp.seek(current_position, 0)
return b
def _read_iso_vol_label(self):
raw_data = self._read_without_position_change(768, 128)
(
label, name, structure, recording_side,
interleave_factor, iso_version
) = struct.unpack('3sx6ss60xs4x2sxs48x', raw_data)
label = label.decode('ascii').strip()
name = name.decode('ascii').strip()
recording_side = int(recording_side)
structure = structure.decode('ascii').strip()
interleave_factor = int(interleave_factor)
iso_version = int(iso_version)
self.iso_volume_label = ISOVolumeLabel(
label, name, structure, recording_side,
interleave_factor, iso_version
)
def _read_rmx_volume_information(self):
raw_data = self._read_without_position_change(384, 128)
(
name, file_driver, block_size, volume_size,
num_fnodes, fnode_start, fnode_size, root_fnode
) = struct.unpack('<10sxBHIHIHH100x', raw_data)
name = name.decode().strip('\x00')
file_driver = int(file_driver)
self.rmx_volume_information = RMXVolumeInformation(
name, file_driver, block_size, volume_size,
num_fnodes, fnode_start, fnode_size, root_fnode
)
def _read_fnode_file(self):
start = self.rmx_volume_information.fnode_start
num_fnodes = self.rmx_volume_information.num_fnodes
fnode_size = self.rmx_volume_information.fnode_size
raw_data = self._read_without_position_change(
start, num_fnodes * fnode_size,
)
for fnode_id in range(num_fnodes):
start = fnode_id * fnode_size
end = (fnode_id + 1) * fnode_size
fnode_data = raw_data[start:end]
fnode = self._read_fnode(fnode_data)
if fnode.flags.allocated and not fnode.flags.deleted:
self._fnodes[fnode_id] = fnode
def _read_fnode(self, raw_data):
fmt = '<HBBHIIIII40sI4xH9sH'
fmt_size = struct.calcsize(fmt)
num_aux_bytes = self.rmx_volume_information.fnode_size - fmt_size
elems = struct.unpack(fmt + '{}x'.format(num_aux_bytes), raw_data)
(
flags, file_type, granularity, owner, creation_time,
access_time, modification_time, total_size, total_blocks,
pointer_data, size, id_count, accessor_data, parent
) = elems
flags = self._parse_flags(flags)
file_type = filetypes[file_type]
pointers = self._parse_pointer_data(pointer_data)
creation_time = self.epoch + timedelta(seconds=creation_time)
access_time = self.epoch + timedelta(seconds=access_time)
modification_time = self.epoch + timedelta(seconds=modification_time)
if flags.long_file:
block_pointers = []
for num_blocks, first_block in pointers:
block_pointers.extend(
self._parse_indirect_blocks(num_blocks, first_block)
)
else:
block_pointers = pointers
return FileNode(
flags, file_type, granularity, owner, creation_time,
access_time, modification_time, total_size, total_blocks,
tuple(block_pointers), size, id_count, accessor_data, parent
)
def _parse_pointer_data(self, data):
fmt = '<H3s'
s = struct.calcsize(fmt)
pointers = []
for start in range(0, 8 * s, s):
num_blocks, block_address = struct.unpack(fmt, data[start: start + s])
if num_blocks == 0:
continue
block_address = self._read_24bit_integer(block_address)
pointers.append(BlockPointer(num_blocks, block_address))
return pointers
@staticmethod
def _read_24bit_integer(data):
val, = struct.unpack('<I', data + b'\x00')
return val
def _parse_indirect_blocks(self, num_blocks, first_block):
fmt = '<B3s'
s = struct.calcsize(fmt)
data = self._read_without_position_change(
first_block, num_blocks * s
)
indirect_blocks = []
for start in range(0, num_blocks * s, s):
num_blocks, block_address = struct.unpack(fmt, data[start: start + s])
block_address = self._read_24bit_integer(block_address)
indirect_blocks.append(BlockPointer(num_blocks, block_address))
return indirect_blocks
@staticmethod
def _parse_flags(flags):
flags = '{0:016b}'.format(flags)[::-1]
flags = list(map(lambda x: bool(int(x)), flags))
flags = Flags(
allocated=flags[0],
long_file=flags[1],
modified=flags[5],
deleted=flags[6],
)
return flags
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.fp.close()
def _get_file_data(self, fnode):
return self._gather_blocks(fnode.block_pointers)
def _gather_blocks(self, block_pointers):
content = b''
for num_blocks, first_block in block_pointers:
content += self._read_blocks(num_blocks, first_block)
return content
def _read_blocks(self, num_blocks, first_block):
''' read `num_blocks` volume blocks starting from `first_block` '''
return self._read_without_position_change(
first_block * self.rmx_volume_information.block_size,
num_blocks * self.rmx_volume_information.block_size
)
@lru_cache()
def _read_directory(self, fnode):
''' returns a dict mapping filenames to file nodes for the given directory '''
assert fnode.type == 'directory'
data = self._get_file_data(fnode)
fmt = 'H14s'
size = struct.calcsize(fmt)
files = {}
for first_byte in range(0, len(data), size):
try:
fnode, name = struct.unpack(fmt, data[first_byte:first_byte + size])
if name == 14 * b'@':
continue
name = name.strip(b'\x00').decode('ascii')
if self._fnodes[fnode].type in ('directory', 'data'):
files[name] = self._fnodes[fnode]
except (IndexError, UnicodeDecodeError):
msg = 'Could not read file entry {} at fnode {} in directory'
logging.warn(msg.format(name, fnode))
return files
def ls(self, directory=None):
fnode = self._path_to_fnode(directory or self._cwd)
if fnode.type == 'data':
return directory
return list(self._read_directory(fnode).keys())
def cd(self, directory=None):
directory = '/' if directory is None else self.abspath(directory)
if self._path_to_fnode(directory).type == 'directory':
self._cwd = directory
else:
raise IOError('No such directory: {}'.format(directory))
def walk(self, base=None):
base = self.abspath(base) if base else self._cwd
basedir = self[base]
files = [basedir[f] for f in basedir.files]
dirs = [basedir[d] for d in basedir.directories]
yield base, dirs, files
for d in dirs:
yield from self.walk(base=d.abspath)
def abspath(self, path):
if path.startswith('/'):
return path
return os.path.join(self._cwd, path)
def pwd(self):
return self._cwd
def main():
parser = argparse.ArgumentParser(
description='Extract files from an irmx86 device or image',
prog='irmx86_extract',
)
parser.add_argument('device', help='iRmx86 formatted device or image')
parser.add_argument('output', help='Where to store the extracted files')
args = parser.parse_args()
with FileSystem(args.device) as fs:
for root, dirs, files in fs.walk('/'):
os.makedirs(args.output + root, exist_ok=True)
for f in files:
outfile = os.path.join(args.output + root, f.name.replace(' ', '_'))
with open(outfile, 'wb') as of:
of.write(f.read())
os.utime(
outfile,
(f.access_time.timestamp(), f.modification_time.timestamp())
)
if __name__ == '__main__':
main()
| |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for Twisted component architecture.
"""
from zope.interface import Interface, implements, Attribute
from zope.interface.adapter import AdapterRegistry
from twisted.trial import unittest
from twisted.python import components
from twisted.python.components import _addHook, _removeHook, proxyForInterface
class InterfacesTestCase(unittest.TestCase):
"""Test interfaces."""
class Compo(components.Componentized):
num = 0
def inc(self):
self.num = self.num + 1
return self.num
class IAdept(Interface):
def adaptorFunc():
raise NotImplementedError()
class IElapsed(Interface):
def elapsedFunc():
"""
1!
"""
class Adept(components.Adapter):
implements(IAdept)
def __init__(self, orig):
self.original = orig
self.num = 0
def adaptorFunc(self):
self.num = self.num + 1
return self.num, self.original.inc()
class Elapsed(components.Adapter):
implements(IElapsed)
def elapsedFunc(self):
return 1
class AComp(components.Componentized):
pass
class BComp(AComp):
pass
class CComp(BComp):
pass
class ITest(Interface):
pass
class ITest2(Interface):
pass
class ITest3(Interface):
pass
class ITest4(Interface):
pass
class Test(components.Adapter):
implements(ITest, ITest3, ITest4)
def __init__(self, orig):
pass
class Test2:
implements(ITest2)
temporaryAdapter = 1
def __init__(self, orig):
pass
class RegistryUsingMixin(object):
"""
Mixin for test cases which modify the global registry somehow.
"""
def setUp(self):
"""
Configure L{twisted.python.components.registerAdapter} to mutate an
alternate registry to improve test isolation.
"""
# Create a brand new, empty registry and put it onto the components
# module where registerAdapter will use it. Also ensure that it goes
# away at the end of the test.
scratchRegistry = AdapterRegistry()
self.patch(components, 'globalRegistry', scratchRegistry)
# Hook the new registry up to the adapter lookup system and ensure that
# association is also discarded after the test.
hook = _addHook(scratchRegistry)
self.addCleanup(_removeHook, hook)
class ComponentizedTestCase(unittest.TestCase, RegistryUsingMixin):
"""
Simple test case for caching in Componentized.
"""
def setUp(self):
RegistryUsingMixin.setUp(self)
components.registerAdapter(Test, AComp, ITest)
components.registerAdapter(Test, AComp, ITest3)
components.registerAdapter(Test2, AComp, ITest2)
def testComponentized(self):
components.registerAdapter(Adept, Compo, IAdept)
components.registerAdapter(Elapsed, Compo, IElapsed)
c = Compo()
assert c.getComponent(IAdept).adaptorFunc() == (1, 1)
assert c.getComponent(IAdept).adaptorFunc() == (2, 2)
assert IElapsed(IAdept(c)).elapsedFunc() == 1
def testInheritanceAdaptation(self):
c = CComp()
co1 = c.getComponent(ITest)
co2 = c.getComponent(ITest)
co3 = c.getComponent(ITest2)
co4 = c.getComponent(ITest2)
assert co1 is co2
assert co3 is not co4
c.removeComponent(co1)
co5 = c.getComponent(ITest)
co6 = c.getComponent(ITest)
assert co5 is co6
assert co1 is not co5
def testMultiAdapter(self):
c = CComp()
co1 = c.getComponent(ITest)
co2 = c.getComponent(ITest2)
co3 = c.getComponent(ITest3)
co4 = c.getComponent(ITest4)
self.assertIdentical(None, co4)
self.assertIdentical(co1, co3)
def test_getComponentDefaults(self):
"""
Test that a default value specified to Componentized.getComponent if
there is no component for the requested interface.
"""
componentized = components.Componentized()
default = object()
self.assertIdentical(
componentized.getComponent(ITest, default),
default)
self.assertIdentical(
componentized.getComponent(ITest, default=default),
default)
self.assertIdentical(
componentized.getComponent(ITest),
None)
class AdapterTestCase(unittest.TestCase):
"""Test adapters."""
def testAdapterGetComponent(self):
o = object()
a = Adept(o)
self.assertRaises(components.CannotAdapt, ITest, a)
self.assertEqual(ITest(a, None), None)
class IMeta(Interface):
pass
class MetaAdder(components.Adapter):
implements(IMeta)
def add(self, num):
return self.original.num + num
class BackwardsAdder(components.Adapter):
implements(IMeta)
def add(self, num):
return self.original.num - num
class MetaNumber:
def __init__(self, num):
self.num = num
class FakeAdder:
def add(self, num):
return num + 5
class FakeNumber:
num = 3
class ComponentNumber(components.Componentized):
def __init__(self):
self.num = 0
components.Componentized.__init__(self)
class ComponentMeta(components.Adapter):
implements(IMeta)
def __init__(self, original):
components.Adapter.__init__(self, original)
self.num = self.original.num
class ComponentAdder(ComponentMeta):
def add(self, num):
self.num += num
return self.num
class ComponentDoubler(ComponentMeta):
def add(self, num):
self.num += (num * 2)
return self.original.num
class IAttrX(Interface):
def x():
pass
class IAttrXX(Interface):
def xx():
pass
class Xcellent:
implements(IAttrX)
def x(self):
return 'x!'
class DoubleXAdapter:
num = 42
def __init__(self, original):
self.original = original
def xx(self):
return (self.original.x(), self.original.x())
def __cmp__(self, other):
return cmp(self.num, other.num)
class TestMetaInterface(RegistryUsingMixin, unittest.TestCase):
def testBasic(self):
components.registerAdapter(MetaAdder, MetaNumber, IMeta)
n = MetaNumber(1)
self.assertEqual(IMeta(n).add(1), 2)
def testComponentizedInteraction(self):
components.registerAdapter(ComponentAdder, ComponentNumber, IMeta)
c = ComponentNumber()
IMeta(c).add(1)
IMeta(c).add(1)
self.assertEqual(IMeta(c).add(1), 3)
def testAdapterWithCmp(self):
# Make sure that a __cmp__ on an adapter doesn't break anything
components.registerAdapter(DoubleXAdapter, IAttrX, IAttrXX)
xx = IAttrXX(Xcellent())
self.assertEqual(('x!', 'x!'), xx.xx())
class RegistrationTestCase(RegistryUsingMixin, unittest.TestCase):
"""
Tests for adapter registration.
"""
def _registerAdapterForClassOrInterface(self, original):
"""
Register an adapter with L{components.registerAdapter} for the given
class or interface and verify that the adapter can be looked up with
L{components.getAdapterFactory}.
"""
adapter = lambda o: None
components.registerAdapter(adapter, original, ITest)
self.assertIdentical(
components.getAdapterFactory(original, ITest, None),
adapter)
def test_registerAdapterForClass(self):
"""
Test that an adapter from a class can be registered and then looked
up.
"""
class TheOriginal(object):
pass
return self._registerAdapterForClassOrInterface(TheOriginal)
def test_registerAdapterForInterface(self):
"""
Test that an adapter from an interface can be registered and then
looked up.
"""
return self._registerAdapterForClassOrInterface(ITest2)
def _duplicateAdapterForClassOrInterface(self, original):
"""
Verify that L{components.registerAdapter} raises L{ValueError} if the
from-type/interface and to-interface pair is not unique.
"""
firstAdapter = lambda o: False
secondAdapter = lambda o: True
components.registerAdapter(firstAdapter, original, ITest)
self.assertRaises(
ValueError,
components.registerAdapter,
secondAdapter, original, ITest)
# Make sure that the original adapter is still around as well
self.assertIdentical(
components.getAdapterFactory(original, ITest, None),
firstAdapter)
def test_duplicateAdapterForClass(self):
"""
Test that attempting to register a second adapter from a class
raises the appropriate exception.
"""
class TheOriginal(object):
pass
return self._duplicateAdapterForClassOrInterface(TheOriginal)
def test_duplicateAdapterForInterface(self):
"""
Test that attempting to register a second adapter from an interface
raises the appropriate exception.
"""
return self._duplicateAdapterForClassOrInterface(ITest2)
def _duplicateAdapterForClassOrInterfaceAllowed(self, original):
"""
Verify that when C{components.ALLOW_DUPLICATES} is set to C{True}, new
adapter registrations for a particular from-type/interface and
to-interface pair replace older registrations.
"""
firstAdapter = lambda o: False
secondAdapter = lambda o: True
class TheInterface(Interface):
pass
components.registerAdapter(firstAdapter, original, TheInterface)
components.ALLOW_DUPLICATES = True
try:
components.registerAdapter(secondAdapter, original, TheInterface)
self.assertIdentical(
components.getAdapterFactory(original, TheInterface, None),
secondAdapter)
finally:
components.ALLOW_DUPLICATES = False
# It should be rejected again at this point
self.assertRaises(
ValueError,
components.registerAdapter,
firstAdapter, original, TheInterface)
self.assertIdentical(
components.getAdapterFactory(original, TheInterface, None),
secondAdapter)
def test_duplicateAdapterForClassAllowed(self):
"""
Test that when L{components.ALLOW_DUPLICATES} is set to a true
value, duplicate registrations from classes are allowed to override
the original registration.
"""
class TheOriginal(object):
pass
return self._duplicateAdapterForClassOrInterfaceAllowed(TheOriginal)
def test_duplicateAdapterForInterfaceAllowed(self):
"""
Test that when L{components.ALLOW_DUPLICATES} is set to a true
value, duplicate registrations from interfaces are allowed to
override the original registration.
"""
class TheOriginal(Interface):
pass
return self._duplicateAdapterForClassOrInterfaceAllowed(TheOriginal)
def _multipleInterfacesForClassOrInterface(self, original):
"""
Verify that an adapter can be registered for multiple to-interfaces at a
time.
"""
adapter = lambda o: None
components.registerAdapter(adapter, original, ITest, ITest2)
self.assertIdentical(
components.getAdapterFactory(original, ITest, None), adapter)
self.assertIdentical(
components.getAdapterFactory(original, ITest2, None), adapter)
def test_multipleInterfacesForClass(self):
"""
Test the registration of an adapter from a class to several
interfaces at once.
"""
class TheOriginal(object):
pass
return self._multipleInterfacesForClassOrInterface(TheOriginal)
def test_multipleInterfacesForInterface(self):
"""
Test the registration of an adapter from an interface to several
interfaces at once.
"""
return self._multipleInterfacesForClassOrInterface(ITest3)
def _subclassAdapterRegistrationForClassOrInterface(self, original):
"""
Verify that a new adapter can be registered for a particular
to-interface from a subclass of a type or interface which already has an
adapter registered to that interface and that the subclass adapter takes
precedence over the base class adapter.
"""
firstAdapter = lambda o: True
secondAdapter = lambda o: False
class TheSubclass(original):
pass
components.registerAdapter(firstAdapter, original, ITest)
components.registerAdapter(secondAdapter, TheSubclass, ITest)
self.assertIdentical(
components.getAdapterFactory(original, ITest, None),
firstAdapter)
self.assertIdentical(
components.getAdapterFactory(TheSubclass, ITest, None),
secondAdapter)
def test_subclassAdapterRegistrationForClass(self):
"""
Test that an adapter to a particular interface can be registered
from both a class and its subclass.
"""
class TheOriginal(object):
pass
return self._subclassAdapterRegistrationForClassOrInterface(TheOriginal)
def test_subclassAdapterRegistrationForInterface(self):
"""
Test that an adapter to a particular interface can be registered
from both an interface and its subclass.
"""
return self._subclassAdapterRegistrationForClassOrInterface(ITest2)
class IProxiedInterface(Interface):
"""
An interface class for use by L{proxyForInterface}.
"""
ifaceAttribute = Attribute("""
An example declared attribute, which should be proxied.""")
def yay(*a, **kw):
"""
A sample method which should be proxied.
"""
class IProxiedSubInterface(IProxiedInterface):
"""
An interface that derives from another for use with L{proxyForInterface}.
"""
def boo(self):
"""
A different sample method which should be proxied.
"""
class Yayable(object):
"""
A provider of L{IProxiedInterface} which increments a counter for
every call to C{yay}.
@ivar yays: The number of times C{yay} has been called.
"""
implements(IProxiedInterface)
def __init__(self):
self.yays = 0
self.yayArgs = []
def yay(self, *a, **kw):
"""
Increment C{self.yays}.
"""
self.yays += 1
self.yayArgs.append((a, kw))
return self.yays
class Booable(object):
"""
An implementation of IProxiedSubInterface
"""
implements(IProxiedSubInterface)
yayed = False
booed = False
def yay(self):
"""
Mark the fact that 'yay' has been called.
"""
self.yayed = True
def boo(self):
"""
Mark the fact that 'boo' has been called.1
"""
self.booed = True
class IMultipleMethods(Interface):
"""
An interface with multiple methods.
"""
def methodOne():
"""
The first method. Should return 1.
"""
def methodTwo():
"""
The second method. Should return 2.
"""
class MultipleMethodImplementor(object):
"""
A precise implementation of L{IMultipleMethods}.
"""
def methodOne(self):
"""
@return: 1
"""
return 1
def methodTwo(self):
"""
@return: 2
"""
return 2
class ProxyForInterfaceTests(unittest.TestCase):
"""
Tests for L{proxyForInterface}.
"""
def test_original(self):
"""
Proxy objects should have an C{original} attribute which refers to the
original object passed to the constructor.
"""
original = object()
proxy = proxyForInterface(IProxiedInterface)(original)
self.assertIdentical(proxy.original, original)
def test_proxyMethod(self):
"""
The class created from L{proxyForInterface} passes methods on an
interface to the object which is passed to its constructor.
"""
klass = proxyForInterface(IProxiedInterface)
yayable = Yayable()
proxy = klass(yayable)
proxy.yay()
self.assertEqual(proxy.yay(), 2)
self.assertEqual(yayable.yays, 2)
def test_proxyAttribute(self):
"""
Proxy objects should proxy declared attributes, but not other
attributes.
"""
yayable = Yayable()
yayable.ifaceAttribute = object()
proxy = proxyForInterface(IProxiedInterface)(yayable)
self.assertIdentical(proxy.ifaceAttribute, yayable.ifaceAttribute)
self.assertRaises(AttributeError, lambda: proxy.yays)
def test_proxySetAttribute(self):
"""
The attributes that proxy objects proxy should be assignable and affect
the original object.
"""
yayable = Yayable()
proxy = proxyForInterface(IProxiedInterface)(yayable)
thingy = object()
proxy.ifaceAttribute = thingy
self.assertIdentical(yayable.ifaceAttribute, thingy)
def test_proxyDeleteAttribute(self):
"""
The attributes that proxy objects proxy should be deletable and affect
the original object.
"""
yayable = Yayable()
yayable.ifaceAttribute = None
proxy = proxyForInterface(IProxiedInterface)(yayable)
del proxy.ifaceAttribute
self.assertFalse(hasattr(yayable, 'ifaceAttribute'))
def test_multipleMethods(self):
"""
[Regression test] The proxy should send its method calls to the correct
method, not the incorrect one.
"""
multi = MultipleMethodImplementor()
proxy = proxyForInterface(IMultipleMethods)(multi)
self.assertEqual(proxy.methodOne(), 1)
self.assertEqual(proxy.methodTwo(), 2)
def test_subclassing(self):
"""
It is possible to subclass the result of L{proxyForInterface}.
"""
class SpecializedProxy(proxyForInterface(IProxiedInterface)):
"""
A specialized proxy which can decrement the number of yays.
"""
def boo(self):
"""
Decrement the number of yays.
"""
self.original.yays -= 1
yayable = Yayable()
special = SpecializedProxy(yayable)
self.assertEqual(yayable.yays, 0)
special.boo()
self.assertEqual(yayable.yays, -1)
def test_proxyName(self):
"""
The name of a proxy class indicates which interface it proxies.
"""
proxy = proxyForInterface(IProxiedInterface)
self.assertEqual(
proxy.__name__,
"(Proxy for "
"twisted.python.test.test_components.IProxiedInterface)")
def test_implements(self):
"""
The resulting proxy implements the interface that it proxies.
"""
proxy = proxyForInterface(IProxiedInterface)
self.assertTrue(IProxiedInterface.implementedBy(proxy))
def test_proxyDescriptorGet(self):
"""
_ProxyDescriptor's __get__ method should return the appropriate
attribute of its argument's 'original' attribute if it is invoked with
an object. If it is invoked with None, it should return a false
class-method emulator instead.
For some reason, Python's documentation recommends to define
descriptors' __get__ methods with the 'type' parameter as optional,
despite the fact that Python itself never actually calls the descriptor
that way. This is probably do to support 'foo.__get__(bar)' as an
idiom. Let's make sure that the behavior is correct. Since we don't
actually use the 'type' argument at all, this test calls it the
idiomatic way to ensure that signature works; test_proxyInheritance
verifies the how-Python-actually-calls-it signature.
"""
class Sample:
called = False
def hello(self):
self.called = True
fakeProxy = Sample()
testObject = Sample()
fakeProxy.original = testObject
pd = components._ProxyDescriptor("hello", "original")
self.assertEqual(pd.__get__(fakeProxy), testObject.hello)
fakeClassMethod = pd.__get__(None)
fakeClassMethod(fakeProxy)
self.failUnless(testObject.called)
def test_proxyInheritance(self):
"""
Subclasses of the class returned from L{proxyForInterface} should be
able to upcall methods by reference to their superclass, as any normal
Python class can.
"""
class YayableWrapper(proxyForInterface(IProxiedInterface)):
"""
This class does not override any functionality.
"""
class EnhancedWrapper(YayableWrapper):
"""
This class overrides the 'yay' method.
"""
wrappedYays = 1
def yay(self, *a, **k):
self.wrappedYays += 1
return YayableWrapper.yay(self, *a, **k) + 7
yayable = Yayable()
wrapper = EnhancedWrapper(yayable)
self.assertEqual(wrapper.yay(3, 4, x=5, y=6), 8)
self.assertEqual(yayable.yayArgs,
[((3, 4), dict(x=5, y=6))])
def test_interfaceInheritance(self):
"""
Proxies of subinterfaces generated with proxyForInterface should allow
access to attributes of both the child and the base interfaces.
"""
proxyClass = proxyForInterface(IProxiedSubInterface)
booable = Booable()
proxy = proxyClass(booable)
proxy.yay()
proxy.boo()
self.failUnless(booable.yayed)
self.failUnless(booable.booed)
def test_attributeCustomization(self):
"""
The original attribute name can be customized via the
C{originalAttribute} argument of L{proxyForInterface}: the attribute
should change, but the methods of the original object should still be
callable, and the attributes still accessible.
"""
yayable = Yayable()
yayable.ifaceAttribute = object()
proxy = proxyForInterface(
IProxiedInterface, originalAttribute='foo')(yayable)
self.assertIdentical(proxy.foo, yayable)
# Check the behavior
self.assertEqual(proxy.yay(), 1)
self.assertIdentical(proxy.ifaceAttribute, yayable.ifaceAttribute)
thingy = object()
proxy.ifaceAttribute = thingy
self.assertIdentical(yayable.ifaceAttribute, thingy)
del proxy.ifaceAttribute
self.assertFalse(hasattr(yayable, 'ifaceAttribute'))
| |
"""Generic (shallow and deep) copying operations.
Interface summary:
import copy
x = copy.copy(y) # make a shallow copy of y
x = copy.deepcopy(y) # make a deep copy of y
For module specific errors, copy.Error is raised.
The difference between shallow and deep copying is only relevant for
compound objects (objects that contain other objects, like lists or
class instances).
- A shallow copy constructs a new compound object and then (to the
extent possible) inserts *the same objects* into it that the
original contains.
- A deep copy constructs a new compound object and then, recursively,
inserts *copies* into it of the objects found in the original.
Two problems often exist with deep copy operations that don't exist
with shallow copy operations:
a) recursive objects (compound objects that, directly or indirectly,
contain a reference to themselves) may cause a recursive loop
b) because deep copy copies *everything* it may copy too much, e.g.
administrative data structures that should be shared even between
copies
Python's deep copy operation avoids these problems by:
a) keeping a table of objects already copied during the current
copying pass
b) letting user-defined classes override the copying operation or the
set of components copied
This version does not copy types like module, class, function, method,
nor stack trace, stack frame, nor file, socket, window, nor array, nor
any similar types.
Classes can use the same interfaces to control copying that they use
to control pickling: they can define methods called __getinitargs__(),
__getstate__() and __setstate__(). See the documentation for module
"pickle" for information on these methods.
"""
import types
import weakref
from copyreg import dispatch_table
class Error(Exception):
pass
error = Error # backward compatibility
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
__all__ = ["Error", "copy", "deepcopy"]
def copy(x):
"""Shallow copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
cls = type(x)
copier = _copy_dispatch.get(cls)
if copier:
return copier(x)
if issubclass(cls, type):
# treat it as a regular class:
return _copy_immutable(x)
copier = getattr(cls, "__copy__", None)
if copier is not None:
return copier(x)
reductor = dispatch_table.get(cls)
if reductor is not None:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor is not None:
rv = reductor(4)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error("un(shallow)copyable object of type %s" % cls)
if isinstance(rv, str):
return x
return _reconstruct(x, None, *rv)
_copy_dispatch = d = {}
def _copy_immutable(x):
return x
for t in (type(None), int, float, bool, complex, str, tuple,
bytes, frozenset, type, range, slice, property,
types.BuiltinFunctionType, type(Ellipsis), type(NotImplemented),
types.FunctionType, weakref.ref):
d[t] = _copy_immutable
t = getattr(types, "CodeType", None)
if t is not None:
d[t] = _copy_immutable
d[list] = list.copy
d[dict] = dict.copy
d[set] = set.copy
d[bytearray] = bytearray.copy
if PyStringMap is not None:
d[PyStringMap] = PyStringMap.copy
del d, t
def deepcopy(x, memo=None, _nil=[]):
"""Deep copy operation on arbitrary Python objects.
See the module's __doc__ string for more info.
"""
if memo is None:
memo = {}
d = id(x)
y = memo.get(d, _nil)
if y is not _nil:
return y
cls = type(x)
copier = _deepcopy_dispatch.get(cls)
if copier is not None:
y = copier(x, memo)
else:
if issubclass(cls, type):
y = _deepcopy_atomic(x, memo)
else:
copier = getattr(x, "__deepcopy__", None)
if copier is not None:
y = copier(memo)
else:
reductor = dispatch_table.get(cls)
if reductor:
rv = reductor(x)
else:
reductor = getattr(x, "__reduce_ex__", None)
if reductor is not None:
rv = reductor(4)
else:
reductor = getattr(x, "__reduce__", None)
if reductor:
rv = reductor()
else:
raise Error(
"un(deep)copyable object of type %s" % cls)
if isinstance(rv, str):
y = x
else:
y = _reconstruct(x, memo, *rv)
# If is its own copy, don't memoize.
if y is not x:
memo[d] = y
_keep_alive(x, memo) # Make sure x lives at least as long as d
return y
_deepcopy_dispatch = d = {}
def _deepcopy_atomic(x, memo):
return x
d[type(None)] = _deepcopy_atomic
d[type(Ellipsis)] = _deepcopy_atomic
d[type(NotImplemented)] = _deepcopy_atomic
d[int] = _deepcopy_atomic
d[float] = _deepcopy_atomic
d[bool] = _deepcopy_atomic
d[complex] = _deepcopy_atomic
d[bytes] = _deepcopy_atomic
d[str] = _deepcopy_atomic
d[types.CodeType] = _deepcopy_atomic
d[type] = _deepcopy_atomic
d[range] = _deepcopy_atomic
d[types.BuiltinFunctionType] = _deepcopy_atomic
d[types.FunctionType] = _deepcopy_atomic
d[weakref.ref] = _deepcopy_atomic
d[property] = _deepcopy_atomic
def _deepcopy_list(x, memo, deepcopy=deepcopy):
y = []
memo[id(x)] = y
append = y.append
for a in x:
append(deepcopy(a, memo))
return y
d[list] = _deepcopy_list
def _deepcopy_tuple(x, memo, deepcopy=deepcopy):
y = [deepcopy(a, memo) for a in x]
# We're not going to put the tuple in the memo, but it's still important we
# check for it, in case the tuple contains recursive mutable structures.
try:
return memo[id(x)]
except KeyError:
pass
for k, j in zip(x, y):
if k is not j:
y = tuple(y)
break
else:
y = x
return y
d[tuple] = _deepcopy_tuple
def _deepcopy_dict(x, memo, deepcopy=deepcopy):
y = {}
memo[id(x)] = y
for key, value in x.items():
y[deepcopy(key, memo)] = deepcopy(value, memo)
return y
d[dict] = _deepcopy_dict
if PyStringMap is not None:
d[PyStringMap] = _deepcopy_dict
def _deepcopy_method(x, memo): # Copy instance methods
return type(x)(x.__func__, deepcopy(x.__self__, memo))
d[types.MethodType] = _deepcopy_method
del d
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
def _reconstruct(x, memo, func, args,
state=None, listiter=None, dictiter=None,
deepcopy=deepcopy):
deep = memo is not None
if deep and args:
args = (deepcopy(arg, memo) for arg in args)
y = func(*args)
if deep:
memo[id(x)] = y
if state is not None:
if deep:
state = deepcopy(state, memo)
if hasattr(y, '__setstate__'):
y.__setstate__(state)
else:
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
else:
slotstate = None
if state is not None:
y.__dict__.update(state)
if slotstate is not None:
for key, value in slotstate.items():
setattr(y, key, value)
if listiter is not None:
if deep:
for item in listiter:
item = deepcopy(item, memo)
y.append(item)
else:
for item in listiter:
y.append(item)
if dictiter is not None:
if deep:
for key, value in dictiter:
key = deepcopy(key, memo)
value = deepcopy(value, memo)
y[key] = value
else:
for key, value in dictiter:
y[key] = value
return y
del types, weakref, PyStringMap
| |
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an SDB Domain
"""
from boto.sdb.queryresultset import SelectResultSet
class Domain:
def __init__(self, connection=None, name=None):
self.connection = connection
self.name = name
self._metadata = None
def __repr__(self):
return 'Domain:%s' % self.name
def __iter__(self):
return iter(self.select("SELECT * FROM `%s`" % self.name))
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'DomainName':
self.name = value
else:
setattr(self, name, value)
def get_metadata(self):
if not self._metadata:
self._metadata = self.connection.domain_metadata(self)
return self._metadata
def put_attributes(self, item_name, attributes,
replace=True, expected_value=None):
"""
Store attributes for a given item.
:type item_name: string
:param item_name: The name of the item whose attributes are being stored.
:type attribute_names: dict or dict-like object
:param attribute_names: The name/value pairs to store as attributes
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
of a single attribute name and expected value. The list can be
of the form:
* ['name', 'value']
In which case the call will first verify that the attribute
"name" of this item has a value of "value". If it does, the delete
will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
* ['name', True|False]
which will simply check for the existence (True) or non-existence
(False) of the attribute.
:type replace: bool
:param replace: Whether the attribute values passed in will replace
existing values or will be added as addition values.
Defaults to True.
:rtype: bool
:return: True if successful
"""
return self.connection.put_attributes(self, item_name, attributes,
replace, expected_value)
def batch_put_attributes(self, items, replace=True):
"""
Store attributes for multiple items.
:type items: dict or dict-like object
:param items: A dictionary-like object. The keys of the dictionary are
the item names and the values are themselves dictionaries
of attribute names/values, exactly the same as the
attribute_names parameter of the scalar put_attributes
call.
:type replace: bool
:param replace: Whether the attribute values passed in will replace
existing values or will be added as addition values.
Defaults to True.
:rtype: bool
:return: True if successful
"""
return self.connection.batch_put_attributes(self, items, replace)
def get_attributes(self, item_name, attribute_name=None,
consistent_read=False, item=None):
"""
Retrieve attributes for a given item.
:type item_name: string
:param item_name: The name of the item whose attributes are being retrieved.
:type attribute_names: string or list of strings
:param attribute_names: An attribute name or list of attribute names. This
parameter is optional. If not supplied, all attributes
will be retrieved for the item.
:rtype: :class:`boto.sdb.item.Item`
:return: An Item mapping type containing the requested attribute name/values
"""
return self.connection.get_attributes(self, item_name, attribute_name,
consistent_read, item)
def delete_attributes(self, item_name, attributes=None,
expected_values=None):
"""
Delete attributes from a given item.
:type item_name: string
:param item_name: The name of the item whose attributes are being deleted.
:type attributes: dict, list or :class:`boto.sdb.item.Item`
:param attributes: Either a list containing attribute names which will cause
all values associated with that attribute name to be deleted or
a dict or Item containing the attribute names and keys and list
of values to delete as the value. If no value is supplied,
all attribute name/values for the item will be deleted.
:type expected_value: list
:param expected_value: If supplied, this is a list or tuple consisting
of a single attribute name and expected value. The list can be of
the form:
* ['name', 'value']
In which case the call will first verify that the attribute "name"
of this item has a value of "value". If it does, the delete
will proceed, otherwise a ConditionalCheckFailed error will be
returned. The list can also be of the form:
* ['name', True|False]
which will simply check for the existence (True) or
non-existence (False) of the attribute.
:rtype: bool
:return: True if successful
"""
return self.connection.delete_attributes(self, item_name, attributes,
expected_values)
def batch_delete_attributes(self, items):
"""
Delete multiple items in this domain.
:type items: dict or dict-like object
:param items: A dictionary-like object. The keys of the dictionary are
the item names and the values are either:
* dictionaries of attribute names/values, exactly the
same as the attribute_names parameter of the scalar
put_attributes call. The attribute name/value pairs
will only be deleted if they match the name/value
pairs passed in.
* None which means that all attributes associated
with the item should be deleted.
:rtype: bool
:return: True if successful
"""
return self.connection.batch_delete_attributes(self, items)
def select(self, query='', next_token=None, consistent_read=False, max_items=None):
"""
Returns a set of Attributes for item names within domain_name that match the query.
The query must be expressed in using the SELECT style syntax rather than the
original SimpleDB query language.
:type query: string
:param query: The SimpleDB query to be performed.
:rtype: iter
:return: An iterator containing the results. This is actually a generator
function that will iterate across all search results, not just the
first page.
"""
return SelectResultSet(self, query, max_items=max_items, next_token=next_token,
consistent_read=consistent_read)
def get_item(self, item_name, consistent_read=False):
"""
Retrieves an item from the domain, along with all of its attributes.
:param string item_name: The name of the item to retrieve.
:rtype: :class:`boto.sdb.item.Item` or ``None``
:keyword bool consistent_read: When set to true, ensures that the most
recent data is returned.
:return: The requested item, or ``None`` if there was no match found
"""
item = self.get_attributes(item_name, consistent_read=consistent_read)
if item:
item.domain = self
return item
else:
return None
def new_item(self, item_name):
return self.connection.item_cls(self, item_name)
def delete_item(self, item):
self.delete_attributes(item.name)
def to_xml(self, f=None):
"""Get this domain as an XML DOM Document
:param f: Optional File to dump directly to
:type f: File or Stream
:return: File object where the XML has been dumped to
:rtype: file
"""
if not f:
from tempfile import TemporaryFile
f = TemporaryFile()
print >> f, '<?xml version="1.0" encoding="UTF-8"?>'
print >> f, '<Domain id="%s">' % self.name
for item in self:
print >> f, '\t<Item id="%s">' % item.name
for k in item:
print >> f, '\t\t<attribute id="%s">' % k
values = item[k]
if not isinstance(values, list):
values = [values]
for value in values:
print >> f, '\t\t\t<value><![CDATA[',
if isinstance(value, unicode):
value = value.encode('utf-8', 'replace')
else:
value = unicode(value, errors='replace').encode('utf-8', 'replace')
f.write(value)
print >> f, ']]></value>'
print >> f, '\t\t</attribute>'
print >> f, '\t</Item>'
print >> f, '</Domain>'
f.flush()
f.seek(0)
return f
def from_xml(self, doc):
"""Load this domain based on an XML document"""
import xml.sax
handler = DomainDumpParser(self)
xml.sax.parse(doc, handler)
return handler
def delete(self):
"""
Delete this domain, and all items under it
"""
return self.connection.delete_domain(self)
class DomainMetaData:
def __init__(self, domain=None):
self.domain = domain
self.item_count = None
self.item_names_size = None
self.attr_name_count = None
self.attr_names_size = None
self.attr_value_count = None
self.attr_values_size = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'ItemCount':
self.item_count = int(value)
elif name == 'ItemNamesSizeBytes':
self.item_names_size = int(value)
elif name == 'AttributeNameCount':
self.attr_name_count = int(value)
elif name == 'AttributeNamesSizeBytes':
self.attr_names_size = int(value)
elif name == 'AttributeValueCount':
self.attr_value_count = int(value)
elif name == 'AttributeValuesSizeBytes':
self.attr_values_size = int(value)
elif name == 'Timestamp':
self.timestamp = value
else:
setattr(self, name, value)
import sys
from xml.sax.handler import ContentHandler
class DomainDumpParser(ContentHandler):
"""
SAX parser for a domain that has been dumped
"""
def __init__(self, domain):
self.uploader = UploaderThread(domain)
self.item_id = None
self.attrs = {}
self.attribute = None
self.value = ""
self.domain = domain
def startElement(self, name, attrs):
if name == "Item":
self.item_id = attrs['id']
self.attrs = {}
elif name == "attribute":
self.attribute = attrs['id']
elif name == "value":
self.value = ""
def characters(self, ch):
self.value += ch
def endElement(self, name):
if name == "value":
if self.value and self.attribute:
value = self.value.strip()
attr_name = self.attribute.strip()
if self.attrs.has_key(attr_name):
self.attrs[attr_name].append(value)
else:
self.attrs[attr_name] = [value]
elif name == "Item":
self.uploader.items[self.item_id] = self.attrs
# Every 20 items we spawn off the uploader
if len(self.uploader.items) >= 20:
self.uploader.start()
self.uploader = UploaderThread(self.domain)
elif name == "Domain":
# If we're done, spawn off our last Uploader Thread
self.uploader.start()
from threading import Thread
class UploaderThread(Thread):
"""Uploader Thread"""
def __init__(self, domain):
self.db = domain
self.items = {}
Thread.__init__(self)
def run(self):
try:
self.db.batch_put_attributes(self.items)
except:
print "Exception using batch put, trying regular put instead"
for item_name in self.items:
self.db.put_attributes(item_name, self.items[item_name])
print ".",
sys.stdout.flush()
| |
#!/usr/bin/env python
# Time-stamp: <12-Jun-2012 16:18:43 PDT by rich.pixley@palm.com>
# Copyright (c) 2008 - 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module is now a thin veneer over what amount to remote procedure
calls to the ZApostd daemon. That is, this is a shell callable script
which implements the client side of those RPCs. Or, if you prefer,
this script uplifts the RPCs into a shell callable API.
.. todo:: add python tests
"""
from __future__ import print_function, unicode_literals
__docformat__ = 'restructuredtext en'
import platform
if platform.python_version_tuple()[0] == "2":
import ConfigParser as configparser
else:
import configparser
import collections
import contextlib
import errno
import logging
import os
import socket
import sys
import time
logger = logging.getLogger(__name__)
@contextlib.contextmanager
def ContextLog(logger, oline, cline):
"""
ContextLog(logger, oline, cline)
Tell'em what you're gonna do, do something, then tell them that
you done it.
"""
logger('{}...'.format(oline))
yield
logger('{}.'.format(cline))
class Connection(object):
"""
This object is now the client side of a set of remote procedure
calls to the ZApostd daemon.
"""
__all__ = [
'close',
'down',
'lock',
'test',
'unlock',
'up',
]
def __init__(self, unixdomainsocket=None, address=None, port=None):
self.logger = logging.getLogger('{}.{}'.format(__name__, self.__class__.__name__))
with ContextLog(self.logger.debug, 'initializing', 'initialized'):
self.unixdomainsocket = unixdomainsocket
self.address = address
self.port = port
if self.address is not None:
af = socket.AF_INET
addr = (self.address, self.port)
else:
af = socket.AF_UNIX
addr = self.unixdomainsocket
logger.debug('connecting on {}'.format(addr))
self.socket = socket.socket(af, socket.SOCK_STREAM)
sleepdur = 1
while True:
try:
self.socket.connect(addr)
break
except socket.error as err:
if err.errno == errno.ECONNREFUSED:
logger.info('connection failed: {} - {}. sleeping {}'.format(
err.errno, os.strerror(err.errno), sleepdur))
time.sleep(sleepdur)
sleepdur *= 2
else:
raise
response = self.socket.recv(1024).decode('utf-8').strip()
self.logger.debug('Received: {}'.format(response))
def up(self):
pass # hm.
def down(self):
self.logger.debug('Requesting daemon shutdown.')
self.socket.send('shutdown\n'.encode('utf-8'))
self.close()
def close(self):
with ContextLog(self.logger.debug, 'closing', 'closed'):
self.socket.close()
del self.socket
def RPC(self, request):
self.logger.debug('Requesting: {}'.format(request))
self.socket.send('{}\n'.format(request))
response = self.socket.recv(1024).decode('utf-8').strip()
self.logger.debug('Received: {}'.format(response))
return collections.deque(response.split())
def clear_locks(self):
"""
Clear all locks.
"""
response = self.RPC('clear_locks')
return list(response)
def locks(self):
"""
Return a list of outstanding locks.
"""
response = self.RPC('locks')
rettodo = response.popleft()
assert rettodo == 'locks', rettodo
return list(response)
def test(self, lock):
"""
Check whether *lock* is locked. Returns True if so, False if
not. See ZApostd.py for locking semantics.
"""
response = self.RPC('test {}'.format(lock))
assert len(response) == 3, response
rettodo = response.popleft()
assert rettodo == 'test', rettodo
retlock = response.popleft()
assert retlock == lock, (retlock, lock)
retret = response.popleft()
retval = True if retret == 'True' else False
assert not len(response), response
return retval
def lock(self, lock, pid):
"""
Lock a lock named *lock* as though it were locked by the
process with *pid*. See ZApostd.py for locking semantics.
"""
response = self.RPC('lock {} {}'.format(pid, lock))
rettodo = response.popleft()
assert rettodo == 'lock', x
retpid = int(response.popleft())
assert retpid == pid, (retpid, pid)
retlock = response.popleft()
assert retlock == lock, (retlock, lock)
retret = response.popleft()
retval = True if retret == 'True' else False
assert not len(response), response
return retval
def unlock(self, lock, pid):
"""
Attempt to unlock *lock* as though by *pid*. See ZApostd.py for
locking semantics.
"""
response = self.RPC('unlock {} {}'.format(pid, lock))
rettodo = response.popleft()
assert rettodo == 'unlock', rettodo
retpid = int(response.popleft())
assert retpid == pid, (retpid, pid)
retlock = response.popleft()
assert retlock == lock, (retlock, lock)
retret = response.popleft()
retval = True if retret == 'True' else False
assert not len(response)
return retval
def next(self, pid, count, requests):
"""
Return the next component(s) to be tested. *Requests* is a list
of requested components. *Pid* is the process id of the
requesting process. *Count* is a number of responses desired.
"""
response = self.RPC('next ' + ' '.join([str(pid), str(count)] + requests))
rettodo = response.popleft()
assert rettodo == 'next', rettodo
retpid = int(response.popleft())
assert retpid == pid, (retpid, pid)
retcount = int(response.popleft())
assert retcount == count, (retcount, count)
return list(response)
def getopts():
import optparse
u = ''
u += 'usage: %prog\n'
default_unixdomainsocket = 'zadc-socket'
configs = configparser.ConfigParser()
configs.read('/etc/za.conf')
parser = optparse.OptionParser(usage = u)
# General options
general = optparse.OptionGroup(parser, 'General Options', '')
general.add_option('-A', '--address', dest='address',
action='store', type='string', default=None,
help='connect to ZApostd on specified address. (default: none')
general.add_option('-C', '--clear-locks', dest='clear_locks',
action='store_true', default=False,
help='clear all outstanding locks. (default: False)')
general.add_option('-c', '--count', dest='count',
action='store', type='int', default=1,
help='produce this many answers. (default: 1)')
general.add_option('-H', '--homedir', dest='homedir',
action='store', type='string', default='',
help='operate with the specific home directory. (default: ~)')
general.add_option('-L', '--lock', dest='locks',
action='append', type='string', default=[],
help='lock the named arguments. (default: lock only as necessary)')
general.add_option('-l', '--logname', dest='logname',
action='store', type='string', default=os.getenv('LOGNAME', 'za-post'),
help='operate with the specific logname. (default: LOGNAME from environment)')
general.add_option('-n', '--component', dest='components',
action='append', type='string', default=[],
help='a component which should be tried before anything else. (may be given multiple times)')
general.add_option('-p', '--pid', dest='pid',
action='store', type='int', default=os.getppid(),
help='pid to be used for locks. (default: pid of calling process)')
general.add_option('-P', '--port', dest='port',
action='store', type='int', default=1962,
help='port on which to connect to ZApostd server. (default: 1962)')
general.add_option('-Q', '--quit', dest='quit',
action='store_true', default=False,
help='ask server to shutdown after processing. (default: False)')
general.add_option('-S', '--locks', dest='all_locks',
action='store_true', default=False,
help='ask for a list of outstanding locks. (default: False)')
general.add_option('-T', '--test', dest='tests',
action='append', type='string', default=[],
help='test named arguments as locks. returns zero exit status if all tested are locked. (default: test only as necessary)')
general.add_option('-U', '--unlock', dest='unlocks',
action='append', type='string', default=[],
help='unlock the named arguments. (default: unlock only as necessary)')
general.add_option('-w', '--workdir', dest='workdir',
action='store', type='string', default=configs.get('za-post', 'workdir'),
help='operate with the specific workdir. (default: {})'.format(configs.get('za-post', 'workdir')))
general.add_option('-X', '--unixdomainsocket', dest='unixdomainsocket',
action='store', type='string', default='',
help='operate with the specific workdir. (default: ~/workdir/{})'.format(default_unixdomainsocket))
parser.add_option_group(general)
# logging options
logopt = optparse.OptionGroup(parser, 'Logging Options', '')
logopt.add_option('-F', '--logfile', dest='logfile',
action='store', type='string', default='',
help='log to named logfile. (default: no logging)')
logopt.add_option('-v', '--loglevel', dest='loglevel',
action='store', type='string', default='info',
help='log level to log. (default: info)')
logopt.add_option('-x', '--trace', dest='trace',
action='store_true', default=False,
help='Log to standard out. (default: False)')
parser.add_option_group(logopt)
options, args = parser.parse_args()
if not options.homedir:
options.homedir = os.path.expanduser('~{}'.format(options.logname))
if not options.workdir:
options.workdir = os.path.join(options.homedir, 'workdir')
if not options.unixdomainsocket:
options.unixdomainsocket = os.path.join(options.workdir, default_unixdomainsocket)
return options, args
def setup_logs(options):
logger = logging.getLogger(__name__)
if options.trace or options.logfile:
loglevel = getattr(logging, options.loglevel.upper())
f = logging.Formatter('%(asctime)s %(filename)s %(levelname)s %(name)s %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
if not os.path.isdir(options.workdir):
os.makedirs(options.workdir)
for l in [logger]:
l.setLevel(loglevel)
if options.trace:
s = logging.StreamHandler()
s.setLevel(loglevel)
s.setFormatter(f)
logging.getLogger('').addHandler(s)
if options.logfile:
fh = logging.FileHandler(options.logfile)
fh.setLevel(loglevel)
fh.setFormatter(f)
logging.getLogger('').addHandler(fh)
return logger
if __name__ == '__main__':
progname = sys.argv[0]
retval = True
options, args = getopts()
logger = setup_logs(options)
connection = Connection(address=options.address,
port=options.port,
unixdomainsocket=options.unixdomainsocket)
if options.clear_locks:
connection.clear_locks()
for lockit in options.locks:
with ContextLog(logger.debug,
'Locking {}'.format(lockit),
'Locked {}'.format(lockit)):
retval &= connection.lock(lockit, options.pid)
tested = [connection.test(testit) for testit in options.tests]
retval &= all(tested)
for t, l in zip(options.tests, tested):
logger.debug('{} -> {}'.format(t, l))
for unlockit in options.unlocks:
with ContextLog(logger.debug,
'Unlocking {}'.format(unlockit),
'Unlocked {}'.format(unlockit)):
retval &= connection.unlock(unlockit, options.pid)
if options.all_locks:
for lock in connection.locks():
print(lock)
if not (options.all_locks
or options.unlocks
or options.locks
or options.tests
or options.clear_locks
or options.quit):
for component in connection.next(options.pid, options.count, options.components):
logger.debug('Next: {}'.format(component))
print(component)
if options.quit:
connection.down()
else:
connection.close()
sys.exit(0 if retval else 1)
# eof
| |
import os
from funcy import distinct, remove
from .helpers import fix_assets_path, array_from_string, parse_boolean, int_or_none, set_from_string
def all_settings():
from types import ModuleType
settings = {}
for name, item in globals().iteritems():
if not callable(item) and not name.startswith("__") and not isinstance(item, ModuleType):
settings[name] = item
return settings
REDIS_URL = os.environ.get('REDASH_REDIS_URL', os.environ.get('REDIS_URL', "redis://localhost:6379/0"))
PROXIES_COUNT = int(os.environ.get('REDASH_PROXIES_COUNT', "1"))
STATSD_HOST = os.environ.get('REDASH_STATSD_HOST', "127.0.0.1")
STATSD_PORT = int(os.environ.get('REDASH_STATSD_PORT', "8125"))
STATSD_PREFIX = os.environ.get('REDASH_STATSD_PREFIX', "redash")
STATSD_USE_TAGS = parse_boolean(os.environ.get('REDASH_STATSD_USE_TAGS', "false"))
# Connection settings for Redash's own database (where we store the queries, results, etc)
SQLALCHEMY_DATABASE_URI = os.environ.get("REDASH_DATABASE_URL", os.environ.get('DATABASE_URL', "postgresql:///postgres"))
SQLALCHEMY_MAX_OVERFLOW = int_or_none(os.environ.get("SQLALCHEMY_MAX_OVERFLOW"))
SQLALCHEMY_POOL_SIZE = int_or_none(os.environ.get("SQLALCHEMY_POOL_SIZE"))
SQLALCHEMY_DISABLE_POOL = parse_boolean(os.environ.get("SQLALCHEMY_DISABLE_POOL", "false"))
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_ECHO = False
# Celery related settings
CELERY_BROKER = os.environ.get("REDASH_CELERY_BROKER", REDIS_URL)
CELERY_BACKEND = os.environ.get("REDASH_CELERY_BACKEND", CELERY_BROKER)
CELERY_TASK_RESULT_EXPIRES = int(os.environ.get('REDASH_CELERY_TASK_RESULT_EXPIRES', 3600 * 4))
# The following enables periodic job (every 5 minutes) of removing unused query results.
QUERY_RESULTS_CLEANUP_ENABLED = parse_boolean(os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_ENABLED", "true"))
QUERY_RESULTS_CLEANUP_COUNT = int(os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_COUNT", "100"))
QUERY_RESULTS_CLEANUP_MAX_AGE = int(os.environ.get("REDASH_QUERY_RESULTS_CLEANUP_MAX_AGE", "7"))
SCHEMAS_REFRESH_SCHEDULE = int(os.environ.get("REDASH_SCHEMAS_REFRESH_SCHEDULE", 30))
AUTH_TYPE = os.environ.get("REDASH_AUTH_TYPE", "api_key")
ENFORCE_HTTPS = parse_boolean(os.environ.get("REDASH_ENFORCE_HTTPS", "false"))
INVITATION_TOKEN_MAX_AGE = int(os.environ.get("REDASH_INVITATION_TOKEN_MAX_AGE", 60 * 60 * 24 * 7))
MULTI_ORG = parse_boolean(os.environ.get("REDASH_MULTI_ORG", "false"))
GOOGLE_CLIENT_ID = os.environ.get("REDASH_GOOGLE_CLIENT_ID", "")
GOOGLE_CLIENT_SECRET = os.environ.get("REDASH_GOOGLE_CLIENT_SECRET", "")
GOOGLE_OAUTH_ENABLED = bool(GOOGLE_CLIENT_ID and GOOGLE_CLIENT_SECRET)
# Enables the use of an externally-provided and trusted remote user via an HTTP
# header. The "user" must be an email address.
#
# By default the trusted header is X-Forwarded-Remote-User. You can change
# this by setting REDASH_REMOTE_USER_HEADER.
#
# Enabling this authentication method is *potentially dangerous*, and it is
# your responsibility to ensure that only a trusted frontend (usually on the
# same server) can talk to the redash backend server, otherwise people will be
# able to login as anyone they want by directly talking to the redash backend.
# You must *also* ensure that any special header in the original request is
# removed or always overwritten by your frontend, otherwise your frontend may
# pass it through to the backend unchanged.
#
# Note that redash will only check the remote user once, upon the first need
# for a login, and then set a cookie which keeps the user logged in. Dropping
# the remote user header after subsequent requests won't automatically log the
# user out. Doing so could be done with further work, but usually it's
# unnecessary.
#
# If you also set the organization setting auth_password_login_enabled to false,
# then your authentication will be seamless. Otherwise a link will be presented
# on the login page to trigger remote user auth.
REMOTE_USER_LOGIN_ENABLED = parse_boolean(os.environ.get("REDASH_REMOTE_USER_LOGIN_ENABLED", "false"))
REMOTE_USER_HEADER = os.environ.get("REDASH_REMOTE_USER_HEADER", "X-Forwarded-Remote-User")
# If the organization setting auth_password_login_enabled is not false, then users will still be
# able to login through Redash instead of the LDAP server
LDAP_LOGIN_ENABLED = parse_boolean(os.environ.get('REDASH_LDAP_LOGIN_ENABLED', 'false'))
# The LDAP directory address (ex. ldap://10.0.10.1:389)
LDAP_HOST_URL = os.environ.get('REDASH_LDAP_URL', None)
# The DN & password used to connect to LDAP to determine the identity of the user being authenticated.
# For AD this should be "org\\user".
LDAP_BIND_DN = os.environ.get('REDASH_LDAP_BIND_DN', None)
LDAP_BIND_DN_PASSWORD = os.environ.get('REDASH_LDAP_BIND_DN_PASSWORD', '')
# AD/LDAP email and display name keys
LDAP_DISPLAY_NAME_KEY = os.environ.get('REDASH_LDAP_DISPLAY_NAME_KEY', 'displayName')
LDAP_EMAIL_KEY = os.environ.get('REDASH_LDAP_EMAIL_KEY', "mail")
# Prompt that should be shown above username/email field.
LDAP_CUSTOM_USERNAME_PROMPT = os.environ.get('REDASH_LDAP_CUSTOM_USERNAME_PROMPT', 'LDAP/AD/SSO username:')
# LDAP Search DN TEMPLATE (for AD this should be "(sAMAccountName=%(username)s)"")
LDAP_SEARCH_TEMPLATE = os.environ.get('REDASH_LDAP_SEARCH_TEMPLATE', '(cn=%(username)s)')
# The schema to bind to (ex. cn=users,dc=ORG,dc=local)
LDAP_SEARCH_DN = os.environ.get('REDASH_LDAP_SEARCH_DN', os.environ.get('REDASH_SEARCH_DN'))
STATIC_ASSETS_PATH = fix_assets_path(os.environ.get("REDASH_STATIC_ASSETS_PATH", "../client/dist/"))
JOB_EXPIRY_TIME = int(os.environ.get("REDASH_JOB_EXPIRY_TIME", 3600 * 12))
COOKIE_SECRET = os.environ.get("REDASH_COOKIE_SECRET", "c292a0a3aa32397cdb050e233733900f")
SESSION_COOKIE_SECURE = parse_boolean(os.environ.get("REDASH_SESSION_COOKIE_SECURE") or str(ENFORCE_HTTPS))
LOG_LEVEL = os.environ.get("REDASH_LOG_LEVEL", "INFO")
LOG_STDOUT = parse_boolean(os.environ.get('REDASH_LOG_STDOUT', 'false'))
LOG_PREFIX = os.environ.get('REDASH_LOG_PREFIX', '')
LOG_FORMAT = os.environ.get('REDASH_LOG_FORMAT', LOG_PREFIX + '[%(asctime)s][PID:%(process)d][%(levelname)s][%(name)s] %(message)s')
CELERYD_LOG_FORMAT = os.environ.get('REDASH_CELERYD_LOG_FORMAT', LOG_PREFIX + '[%(asctime)s][PID:%(process)d][%(levelname)s][%(processName)s] %(message)s')
CELERYD_TASK_LOG_FORMAT = os.environ.get('REDASH_CELERYD_TASK_LOG_FORMAT', LOG_PREFIX + '[%(asctime)s][PID:%(process)d][%(levelname)s][%(processName)s] task_name=%(task_name)s task_id=%(task_id)s %(message)s')
# Mail settings:
MAIL_SERVER = os.environ.get('REDASH_MAIL_SERVER', 'localhost')
MAIL_PORT = int(os.environ.get('REDASH_MAIL_PORT', 25))
MAIL_USE_TLS = parse_boolean(os.environ.get('REDASH_MAIL_USE_TLS', 'false'))
MAIL_USE_SSL = parse_boolean(os.environ.get('REDASH_MAIL_USE_SSL', 'false'))
MAIL_USERNAME = os.environ.get('REDASH_MAIL_USERNAME', None)
MAIL_PASSWORD = os.environ.get('REDASH_MAIL_PASSWORD', None)
MAIL_DEFAULT_SENDER = os.environ.get('REDASH_MAIL_DEFAULT_SENDER', None)
MAIL_MAX_EMAILS = os.environ.get('REDASH_MAIL_MAX_EMAILS', None)
MAIL_ASCII_ATTACHMENTS = parse_boolean(os.environ.get('REDASH_MAIL_ASCII_ATTACHMENTS', 'false'))
HOST = os.environ.get('REDASH_HOST', '')
ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE = os.environ.get('REDASH_ALERTS_DEFAULT_MAIL_SUBJECT_TEMPLATE', "({state}) {alert_name}")
# How many requests are allowed per IP to the login page before
# being throttled?
# See https://flask-limiter.readthedocs.io/en/stable/#rate-limit-string-notation
THROTTLE_LOGIN_PATTERN = os.environ.get('REDASH_THROTTLE_LOGIN_PATTERN', '50/hour')
LIMITER_STORAGE = os.environ.get("REDASH_LIMITER_STORAGE", REDIS_URL)
# CORS settings for the Query Result API (and possbily future external APIs).
# In most cases all you need to do is set REDASH_CORS_ACCESS_CONTROL_ALLOW_ORIGIN
# to the calling domain (or domains in a comma separated list).
ACCESS_CONTROL_ALLOW_ORIGIN = set_from_string(os.environ.get("REDASH_CORS_ACCESS_CONTROL_ALLOW_ORIGIN", ""))
ACCESS_CONTROL_ALLOW_CREDENTIALS = parse_boolean(os.environ.get("REDASH_CORS_ACCESS_CONTROL_ALLOW_CREDENTIALS", "false"))
ACCESS_CONTROL_REQUEST_METHOD = os.environ.get("REDASH_CORS_ACCESS_CONTROL_REQUEST_METHOD", "GET, POST, PUT")
ACCESS_CONTROL_ALLOW_HEADERS = os.environ.get("REDASH_CORS_ACCESS_CONTROL_ALLOW_HEADERS", "Content-Type")
# Query Runners
default_query_runners = [
'redash.query_runner.athena',
'redash.query_runner.big_query',
'redash.query_runner.drill',
'redash.query_runner.google_spreadsheets',
'redash.query_runner.graphite',
'redash.query_runner.mongodb',
'redash.query_runner.mysql',
'redash.query_runner.pg',
'redash.query_runner.url',
'redash.query_runner.influx_db',
'redash.query_runner.elasticsearch',
'redash.query_runner.presto',
'redash.query_runner.hive_ds',
'redash.query_runner.impala_ds',
'redash.query_runner.vertica',
'redash.query_runner.clickhouse',
'redash.query_runner.yandex_metrika',
'redash.query_runner.treasuredata',
'redash.query_runner.sqlite',
'redash.query_runner.dynamodb_sql',
'redash.query_runner.mssql',
'redash.query_runner.memsql_ds',
'redash.query_runner.mapd',
'redash.query_runner.jql',
'redash.query_runner.google_analytics',
'redash.query_runner.axibase_tsd',
'redash.query_runner.salesforce',
'redash.query_runner.query_results',
'redash.query_runner.prometheus',
'redash.query_runner.qubole'
]
enabled_query_runners = array_from_string(os.environ.get("REDASH_ENABLED_QUERY_RUNNERS", ",".join(default_query_runners)))
additional_query_runners = array_from_string(os.environ.get("REDASH_ADDITIONAL_QUERY_RUNNERS", ""))
disabled_query_runners = array_from_string(os.environ.get("REDASH_DISABLED_QUERY_RUNNERS", ""))
QUERY_RUNNERS = remove(set(disabled_query_runners), distinct(enabled_query_runners + additional_query_runners))
ADHOC_QUERY_TIME_LIMIT = int_or_none(os.environ.get('REDASH_ADHOC_QUERY_TIME_LIMIT', None))
# Destinations
default_destinations = [
'redash.destinations.email',
'redash.destinations.slack',
'redash.destinations.webhook',
'redash.destinations.hipchat',
'redash.destinations.mattermost',
'redash.destinations.chatwork',
]
enabled_destinations = array_from_string(os.environ.get("REDASH_ENABLED_DESTINATIONS", ",".join(default_destinations)))
additional_destinations = array_from_string(os.environ.get("REDASH_ADDITIONAL_DESTINATIONS", ""))
DESTINATIONS = distinct(enabled_destinations + additional_destinations)
EVENT_REPORTING_WEBHOOKS = array_from_string(os.environ.get("REDASH_EVENT_REPORTING_WEBHOOKS", ""))
# Support for Sentry (http://getsentry.com/). Just set your Sentry DSN to enable it:
SENTRY_DSN = os.environ.get("REDASH_SENTRY_DSN", "")
# Client side toggles:
ALLOW_SCRIPTS_IN_USER_INPUT = parse_boolean(os.environ.get("REDASH_ALLOW_SCRIPTS_IN_USER_INPUT", "false"))
DATE_FORMAT = os.environ.get("REDASH_DATE_FORMAT", "DD/MM/YY")
DASHBOARD_REFRESH_INTERVALS = map(int, array_from_string(os.environ.get("REDASH_DASHBOARD_REFRESH_INTERVALS", "60,300,600,1800,3600,43200,86400")))
QUERY_REFRESH_INTERVALS = map(int, array_from_string(os.environ.get("REDASH_QUERY_REFRESH_INTERVALS", "60, 300, 600, 900, 1800, 3600, 7200, 10800, 14400, 18000, 21600, 25200, 28800, 32400, 36000, 39600, 43200, 86400, 604800, 1209600, 2592000")))
# Features:
VERSION_CHECK = parse_boolean(os.environ.get("REDASH_VERSION_CHECK", "true"))
FEATURE_DISABLE_REFRESH_QUERIES = parse_boolean(os.environ.get("REDASH_FEATURE_DISABLE_REFRESH_QUERIES", "false"))
FEATURE_SHOW_QUERY_RESULTS_COUNT = parse_boolean(os.environ.get("REDASH_FEATURE_SHOW_QUERY_RESULTS_COUNT", "true"))
FEATURE_SHOW_PERMISSIONS_CONTROL = parse_boolean(os.environ.get("REDASH_FEATURE_SHOW_PERMISSIONS_CONTROL", "false"))
FEATURE_ALLOW_CUSTOM_JS_VISUALIZATIONS = parse_boolean(os.environ.get("REDASH_FEATURE_ALLOW_CUSTOM_JS_VISUALIZATIONS", "false"))
FEATURE_DUMB_RECENTS = parse_boolean(os.environ.get("REDASH_FEATURE_DUMB_RECENTS", "false"))
FEATURE_AUTO_PUBLISH_NAMED_QUERIES = parse_boolean(os.environ.get("REDASH_FEATURE_AUTO_PUBLISH_NAMED_QUERIES", "true"))
# BigQuery
BIGQUERY_HTTP_TIMEOUT = int(os.environ.get("REDASH_BIGQUERY_HTTP_TIMEOUT", "600"))
# Enhance schema fetching
SCHEMA_RUN_TABLE_SIZE_CALCULATIONS = parse_boolean(os.environ.get("REDASH_SCHEMA_RUN_TABLE_SIZE_CALCULATIONS", "false"))
# Allow Parameters in Embeds
# WARNING: With this option enabled, Redash reads query parameters from the request URL (risk of SQL injection!)
ALLOW_PARAMETERS_IN_EMBEDS = parse_boolean(os.environ.get("REDASH_ALLOW_PARAMETERS_IN_EMBEDS", "false"))
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
import unittest
from telemetry.timeline import async_slice
from telemetry.timeline import bounds
from telemetry.timeline import model
from telemetry.util import perf_tests_helper
from telemetry.util import statistics
from telemetry.web_perf.metrics import rendering_stats
class MockTimer(object):
"""A mock timer class which can generate random durations.
An instance of this class is used as a global timer to generate random
durations for stats and consistent timestamps for all mock trace events.
The unit of time is milliseconds.
"""
def __init__(self):
self.milliseconds = 0
def Advance(self, low=0.1, high=1):
delta = random.uniform(low, high)
self.milliseconds += delta
return delta
def AdvanceAndGet(self, low=0.1, high=1):
self.Advance(low, high)
return self.milliseconds
class ReferenceRenderingStats(object):
""" Stores expected data for comparison with actual RenderingStats """
def __init__(self):
self.frame_timestamps = []
self.frame_times = []
self.approximated_pixel_percentages = []
self.checkerboarded_pixel_percentages = []
def AppendNewRange(self):
self.frame_timestamps.append([])
self.frame_times.append([])
self.approximated_pixel_percentages.append([])
self.checkerboarded_pixel_percentages.append([])
class ReferenceInputLatencyStats(object):
""" Stores expected data for comparison with actual input latency stats """
def __init__(self):
self.input_event_latency = []
self.input_event = []
def AddSurfaceFlingerStats(mock_timer, thread, first_frame,
ref_stats=None):
""" Adds a random surface flinger stats event.
thread: The timeline model thread to which the event will be added.
first_frame: Is this the first frame within the bounds of an action?
ref_stats: A ReferenceRenderingStats object to record expected values.
"""
# Create randonm data and timestap for impl thread rendering stats.
data = {'frame_count': 1,
'refresh_period': 16.6666}
timestamp = mock_timer.AdvanceAndGet()
# Add a slice with the event data to the given thread.
thread.PushCompleteSlice(
'SurfaceFlinger', 'vsync_before',
timestamp, duration=0.0, thread_timestamp=None, thread_duration=None,
args={'data': data})
if not ref_stats:
return
# Add timestamp only if a frame was output
if data['frame_count'] == 1:
if not first_frame:
# Add frame_time if this is not the first frame in within the bounds of an
# action.
prev_timestamp = ref_stats.frame_timestamps[-1][-1]
ref_stats.frame_times[-1].append(timestamp - prev_timestamp)
ref_stats.frame_timestamps[-1].append(timestamp)
def AddDisplayRenderingStats(mock_timer, thread, first_frame,
ref_stats=None):
""" Adds a random display rendering stats event.
thread: The timeline model thread to which the event will be added.
first_frame: Is this the first frame within the bounds of an action?
ref_stats: A ReferenceRenderingStats object to record expected values.
"""
# Create randonm data and timestap for main thread rendering stats.
data = {'frame_count': 1}
timestamp = mock_timer.AdvanceAndGet()
# Add a slice with the event data to the given thread.
thread.PushCompleteSlice(
'benchmark', 'BenchmarkInstrumentation::DisplayRenderingStats',
timestamp, duration=0.0, thread_timestamp=None, thread_duration=None,
args={'data': data})
if not ref_stats:
return
# Add timestamp only if a frame was output
if not first_frame:
# Add frame_time if this is not the first frame in within the bounds of an
# action.
prev_timestamp = ref_stats.frame_timestamps[-1][-1]
ref_stats.frame_times[-1].append(timestamp - prev_timestamp)
ref_stats.frame_timestamps[-1].append(timestamp)
def AddImplThreadRenderingStats(mock_timer, thread, first_frame,
ref_stats=None):
""" Adds a random impl thread rendering stats event.
thread: The timeline model thread to which the event will be added.
first_frame: Is this the first frame within the bounds of an action?
ref_stats: A ReferenceRenderingStats object to record expected values.
"""
# Create randonm data and timestap for impl thread rendering stats.
data = {'frame_count': 1,
'visible_content_area': random.uniform(0, 100),
'approximated_visible_content_area': random.uniform(0, 5),
'checkerboarded_visible_content_area': random.uniform(0, 5)}
timestamp = mock_timer.AdvanceAndGet()
# Add a slice with the event data to the given thread.
thread.PushCompleteSlice(
'benchmark', 'BenchmarkInstrumentation::ImplThreadRenderingStats',
timestamp, duration=0.0, thread_timestamp=None, thread_duration=None,
args={'data': data})
if not ref_stats:
return
# Add timestamp only if a frame was output
if data['frame_count'] == 1:
if not first_frame:
# Add frame_time if this is not the first frame in within the bounds of an
# action.
prev_timestamp = ref_stats.frame_timestamps[-1][-1]
ref_stats.frame_times[-1].append(timestamp - prev_timestamp)
ref_stats.frame_timestamps[-1].append(timestamp)
ref_stats.approximated_pixel_percentages[-1].append(
round(statistics.DivideIfPossibleOrZero(
data['approximated_visible_content_area'],
data['visible_content_area']) * 100.0, 3))
ref_stats.checkerboarded_pixel_percentages[-1].append(
round(statistics.DivideIfPossibleOrZero(
data['checkerboarded_visible_content_area'],
data['visible_content_area']) * 100.0, 3))
def AddInputLatencyStats(mock_timer, start_thread, end_thread,
ref_latency_stats=None):
""" Adds a random input latency stats event.
start_thread: The start thread on which the async slice is added.
end_thread: The end thread on which the async slice is ended.
ref_latency_stats: A ReferenceInputLatencyStats object for expected values.
"""
original_comp_time = mock_timer.AdvanceAndGet(2, 4) * 1000.0
ui_comp_time = mock_timer.AdvanceAndGet(2, 4) * 1000.0
begin_comp_time = mock_timer.AdvanceAndGet(2, 4) * 1000.0
forward_comp_time = mock_timer.AdvanceAndGet(2, 4) * 1000.0
end_comp_time = mock_timer.AdvanceAndGet(10, 20) * 1000.0
data = {rendering_stats.ORIGINAL_COMP_NAME: {'time': original_comp_time},
rendering_stats.UI_COMP_NAME: {'time': ui_comp_time},
rendering_stats.BEGIN_COMP_NAME: {'time': begin_comp_time},
rendering_stats.END_COMP_NAME: {'time': end_comp_time}}
timestamp = mock_timer.AdvanceAndGet(2, 4)
tracing_async_slice = async_slice.AsyncSlice(
'benchmark', 'InputLatency', timestamp)
async_sub_slice = async_slice.AsyncSlice(
'benchmark', rendering_stats.GESTURE_SCROLL_UPDATE_EVENT_NAME, timestamp)
async_sub_slice.args = {'data': data}
async_sub_slice.parent_slice = tracing_async_slice
async_sub_slice.start_thread = start_thread
async_sub_slice.end_thread = end_thread
tracing_async_slice.sub_slices.append(async_sub_slice)
tracing_async_slice.start_thread = start_thread
tracing_async_slice.end_thread = end_thread
start_thread.AddAsyncSlice(tracing_async_slice)
# Add scroll update latency info.
scroll_update_data = {
rendering_stats.BEGIN_SCROLL_UPDATE_COMP_NAME: {'time': begin_comp_time},
rendering_stats.FORWARD_SCROLL_UPDATE_COMP_NAME:
{'time': forward_comp_time},
rendering_stats.END_COMP_NAME: {'time': end_comp_time}
}
scroll_async_slice = async_slice.AsyncSlice(
'benchmark', 'InputLatency', timestamp)
scroll_async_sub_slice = async_slice.AsyncSlice(
'benchmark', rendering_stats.SCROLL_UPDATE_EVENT_NAME, timestamp)
scroll_async_sub_slice.args = {'data': scroll_update_data}
scroll_async_sub_slice.parent_slice = scroll_async_slice
scroll_async_sub_slice.start_thread = start_thread
scroll_async_sub_slice.end_thread = end_thread
scroll_async_slice.sub_slices.append(scroll_async_sub_slice)
scroll_async_slice.start_thread = start_thread
scroll_async_slice.end_thread = end_thread
start_thread.AddAsyncSlice(scroll_async_slice)
# Also add some dummy frame statistics so we can feed the resulting timeline
# to RenderingStats.
AddImplThreadRenderingStats(mock_timer, end_thread, False)
if not ref_latency_stats:
return
ref_latency_stats.input_event.append(async_sub_slice)
ref_latency_stats.input_event.append(scroll_async_sub_slice)
ref_latency_stats.input_event_latency.append((
rendering_stats.GESTURE_SCROLL_UPDATE_EVENT_NAME,
(data[rendering_stats.END_COMP_NAME]['time'] -
data[rendering_stats.ORIGINAL_COMP_NAME]['time']) / 1000.0))
scroll_update_time = (
scroll_update_data[rendering_stats.END_COMP_NAME]['time'] -
scroll_update_data[rendering_stats.BEGIN_SCROLL_UPDATE_COMP_NAME]['time'])
ref_latency_stats.input_event_latency.append((
rendering_stats.SCROLL_UPDATE_EVENT_NAME,
scroll_update_time / 1000.0))
class RenderingStatsUnitTest(unittest.TestCase):
def testHasRenderingStats(self):
timeline = model.TimelineModel()
timer = MockTimer()
# A process without rendering stats
process_without_stats = timeline.GetOrCreateProcess(pid=1)
thread_without_stats = process_without_stats.GetOrCreateThread(tid=11)
process_without_stats.FinalizeImport()
self.assertFalse(rendering_stats.HasRenderingStats(thread_without_stats))
# A process with rendering stats, but no frames in them
process_without_frames = timeline.GetOrCreateProcess(pid=2)
thread_without_frames = process_without_frames.GetOrCreateThread(tid=21)
process_without_frames.FinalizeImport()
self.assertFalse(rendering_stats.HasRenderingStats(thread_without_frames))
# A process with rendering stats and frames in them
process_with_frames = timeline.GetOrCreateProcess(pid=3)
thread_with_frames = process_with_frames.GetOrCreateThread(tid=31)
AddImplThreadRenderingStats(timer, thread_with_frames, True, None)
process_with_frames.FinalizeImport()
self.assertTrue(rendering_stats.HasRenderingStats(thread_with_frames))
def testBothSurfaceFlingerAndDisplayStats(self):
timeline = model.TimelineModel()
timer = MockTimer()
ref_stats = ReferenceRenderingStats()
ref_stats.AppendNewRange()
surface_flinger = timeline.GetOrCreateProcess(pid=4)
surface_flinger.name = 'SurfaceFlinger'
surface_flinger_thread = surface_flinger.GetOrCreateThread(tid=41)
renderer = timeline.GetOrCreateProcess(pid=2)
browser = timeline.GetOrCreateProcess(pid=3)
browser_main = browser.GetOrCreateThread(tid=31)
browser_main.BeginSlice('webkit.console', 'ActionA',
timer.AdvanceAndGet(2, 4), '')
# Create SurfaceFlinger stats and display rendering stats.
for i in xrange(0, 10):
first = (i == 0)
AddSurfaceFlingerStats(timer, surface_flinger_thread, first, ref_stats)
timer.Advance(2, 4)
for i in xrange(0, 10):
first = (i == 0)
AddDisplayRenderingStats(timer, browser_main, first, None)
timer.Advance(5, 10)
browser_main.EndSlice(timer.AdvanceAndGet())
timer.Advance(2, 4)
browser.FinalizeImport()
renderer.FinalizeImport()
timeline_markers = timeline.FindTimelineMarkers(['ActionA'])
timeline_ranges = [bounds.Bounds.CreateFromEvent(marker)
for marker in timeline_markers]
stats = rendering_stats.RenderingStats(
renderer, browser, surface_flinger, timeline_ranges)
# Compare rendering stats to reference - Only SurfaceFlinger stats should
# count
self.assertEquals(stats.frame_timestamps, ref_stats.frame_timestamps)
self.assertEquals(stats.frame_times, ref_stats.frame_times)
def testBothDisplayAndImplStats(self):
timeline = model.TimelineModel()
timer = MockTimer()
ref_stats = ReferenceRenderingStats()
ref_stats.AppendNewRange()
renderer = timeline.GetOrCreateProcess(pid=2)
browser = timeline.GetOrCreateProcess(pid=3)
browser_main = browser.GetOrCreateThread(tid=31)
browser_main.BeginSlice('webkit.console', 'ActionA',
timer.AdvanceAndGet(2, 4), '')
# Create main, impl, and display rendering stats.
for i in xrange(0, 10):
first = (i == 0)
AddImplThreadRenderingStats(timer, browser_main, first, None)
timer.Advance(2, 4)
for i in xrange(0, 10):
first = (i == 0)
AddDisplayRenderingStats(timer, browser_main, first, ref_stats)
timer.Advance(5, 10)
browser_main.EndSlice(timer.AdvanceAndGet())
timer.Advance(2, 4)
browser.FinalizeImport()
renderer.FinalizeImport()
timeline_markers = timeline.FindTimelineMarkers(['ActionA'])
timeline_ranges = [bounds.Bounds.CreateFromEvent(marker)
for marker in timeline_markers]
stats = rendering_stats.RenderingStats(
renderer, browser, None, timeline_ranges)
# Compare rendering stats to reference - Only display stats should count
self.assertEquals(stats.frame_timestamps, ref_stats.frame_timestamps)
self.assertEquals(stats.frame_times, ref_stats.frame_times)
def testRangeWithoutFrames(self):
timer = MockTimer()
timeline = model.TimelineModel()
# Create a renderer process, with a main thread and impl thread.
renderer = timeline.GetOrCreateProcess(pid=2)
renderer_main = renderer.GetOrCreateThread(tid=21)
renderer_compositor = renderer.GetOrCreateThread(tid=22)
# Create 10 main and impl rendering stats events for Action A.
renderer_main.BeginSlice('webkit.console', 'ActionA',
timer.AdvanceAndGet(2, 4), '')
for i in xrange(0, 10):
first = (i == 0)
AddImplThreadRenderingStats(timer, renderer_compositor, first, None)
renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
timer.Advance(2, 4)
# Create 5 main and impl rendering stats events not within any action.
for i in xrange(0, 5):
first = (i == 0)
AddImplThreadRenderingStats(timer, renderer_compositor, first, None)
# Create Action B without any frames. This should trigger
# NotEnoughFramesError when the RenderingStats object is created.
renderer_main.BeginSlice('webkit.console', 'ActionB',
timer.AdvanceAndGet(2, 4), '')
renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
renderer.FinalizeImport()
timeline_markers = timeline.FindTimelineMarkers(['ActionA', 'ActionB'])
timeline_ranges = [bounds.Bounds.CreateFromEvent(marker)
for marker in timeline_markers]
stats = rendering_stats.RenderingStats(
renderer, None, None, timeline_ranges)
self.assertEquals(0, len(stats.frame_timestamps[1]))
def testFromTimeline(self):
timeline = model.TimelineModel()
# Create a browser process and a renderer process, and a main thread and
# impl thread for each.
browser = timeline.GetOrCreateProcess(pid=1)
browser_compositor = browser.GetOrCreateThread(tid=12)
renderer = timeline.GetOrCreateProcess(pid=2)
renderer_main = renderer.GetOrCreateThread(tid=21)
renderer_compositor = renderer.GetOrCreateThread(tid=22)
timer = MockTimer()
renderer_ref_stats = ReferenceRenderingStats()
browser_ref_stats = ReferenceRenderingStats()
# Create 10 main and impl rendering stats events for Action A.
renderer_main.BeginSlice('webkit.console', 'ActionA',
timer.AdvanceAndGet(2, 4), '')
renderer_ref_stats.AppendNewRange()
browser_ref_stats.AppendNewRange()
for i in xrange(0, 10):
first = (i == 0)
AddImplThreadRenderingStats(
timer, renderer_compositor, first, renderer_ref_stats)
AddImplThreadRenderingStats(
timer, browser_compositor, first, browser_ref_stats)
renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
# Create 5 main and impl rendering stats events not within any action.
for i in xrange(0, 5):
first = (i == 0)
AddImplThreadRenderingStats(timer, renderer_compositor, first, None)
AddImplThreadRenderingStats(timer, browser_compositor, first, None)
# Create 10 main and impl rendering stats events for Action B.
renderer_main.BeginSlice('webkit.console', 'ActionB',
timer.AdvanceAndGet(2, 4), '')
renderer_ref_stats.AppendNewRange()
browser_ref_stats.AppendNewRange()
for i in xrange(0, 10):
first = (i == 0)
AddImplThreadRenderingStats(
timer, renderer_compositor, first, renderer_ref_stats)
AddImplThreadRenderingStats(
timer, browser_compositor, first, browser_ref_stats)
renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
# Create 10 main and impl rendering stats events for Action A.
renderer_main.BeginSlice('webkit.console', 'ActionA',
timer.AdvanceAndGet(2, 4), '')
renderer_ref_stats.AppendNewRange()
browser_ref_stats.AppendNewRange()
for i in xrange(0, 10):
first = (i == 0)
AddImplThreadRenderingStats(
timer, renderer_compositor, first, renderer_ref_stats)
AddImplThreadRenderingStats(
timer, browser_compositor, first, browser_ref_stats)
renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
timer.Advance(2, 4)
browser.FinalizeImport()
renderer.FinalizeImport()
timeline_markers = timeline.FindTimelineMarkers(
['ActionA', 'ActionB', 'ActionA'])
timeline_ranges = [bounds.Bounds.CreateFromEvent(marker)
for marker in timeline_markers]
stats = rendering_stats.RenderingStats(
renderer, browser, None, timeline_ranges)
# Compare rendering stats to reference.
self.assertEquals(stats.frame_timestamps,
browser_ref_stats.frame_timestamps)
self.assertEquals(stats.frame_times, browser_ref_stats.frame_times)
self.assertEquals(stats.approximated_pixel_percentages,
renderer_ref_stats.approximated_pixel_percentages)
self.assertEquals(stats.checkerboarded_pixel_percentages,
renderer_ref_stats.checkerboarded_pixel_percentages)
def testInputLatencyFromTimeline(self):
timeline = model.TimelineModel()
# Create a browser process and a renderer process.
browser = timeline.GetOrCreateProcess(pid=1)
browser_main = browser.GetOrCreateThread(tid=11)
renderer = timeline.GetOrCreateProcess(pid=2)
renderer_main = renderer.GetOrCreateThread(tid=21)
timer = MockTimer()
ref_latency = ReferenceInputLatencyStats()
# Create 10 input latency stats events for Action A.
renderer_main.BeginSlice('webkit.console', 'ActionA',
timer.AdvanceAndGet(2, 4), '')
for _ in xrange(0, 10):
AddInputLatencyStats(timer, browser_main, renderer_main, ref_latency)
renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
# Create 5 input latency stats events not within any action.
timer.Advance(2, 4)
for _ in xrange(0, 5):
AddInputLatencyStats(timer, browser_main, renderer_main, None)
# Create 10 input latency stats events for Action B.
renderer_main.BeginSlice('webkit.console', 'ActionB',
timer.AdvanceAndGet(2, 4), '')
for _ in xrange(0, 10):
AddInputLatencyStats(timer, browser_main, renderer_main, ref_latency)
renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
# Create 10 input latency stats events for Action A.
renderer_main.BeginSlice('webkit.console', 'ActionA',
timer.AdvanceAndGet(2, 4), '')
for _ in xrange(0, 10):
AddInputLatencyStats(timer, browser_main, renderer_main, ref_latency)
renderer_main.EndSlice(timer.AdvanceAndGet(2, 4))
browser.FinalizeImport()
renderer.FinalizeImport()
latency_events = []
timeline_markers = timeline.FindTimelineMarkers(
['ActionA', 'ActionB', 'ActionA'])
timeline_ranges = [bounds.Bounds.CreateFromEvent(marker)
for marker in timeline_markers]
for timeline_range in timeline_ranges:
if timeline_range.is_empty:
continue
latency_events.extend(rendering_stats.GetLatencyEvents(
browser, timeline_range))
self.assertEquals(latency_events, ref_latency.input_event)
event_latency_result = rendering_stats.ComputeEventLatencies(latency_events)
self.assertEquals(event_latency_result,
ref_latency.input_event_latency)
stats = rendering_stats.RenderingStats(
renderer, browser, None, timeline_ranges)
self.assertEquals(
perf_tests_helper.FlattenList(stats.input_event_latency),
[latency for name, latency in ref_latency.input_event_latency
if name != rendering_stats.SCROLL_UPDATE_EVENT_NAME])
self.assertEquals(
perf_tests_helper.FlattenList(stats.scroll_update_latency),
[latency for name, latency in ref_latency.input_event_latency
if name == rendering_stats.SCROLL_UPDATE_EVENT_NAME])
self.assertEquals(
perf_tests_helper.FlattenList(stats.gesture_scroll_update_latency),
[latency for name, latency in ref_latency.input_event_latency
if name == rendering_stats.GESTURE_SCROLL_UPDATE_EVENT_NAME])
| |
import gc
import sys
import unittest
import UserList
import weakref
from test import test_support
class C:
def method(self):
pass
class Callable:
bar = None
def __call__(self, x):
self.bar = x
def create_function():
def f(): pass
return f
def create_bound_method():
return C().method
def create_unbound_method():
return C.method
class TestBase(unittest.TestCase):
def setUp(self):
self.cbcalled = 0
def callback(self, ref):
self.cbcalled += 1
class ReferencesTestCase(TestBase):
def test_basic_ref(self):
self.check_basic_ref(C)
self.check_basic_ref(create_function)
self.check_basic_ref(create_bound_method)
self.check_basic_ref(create_unbound_method)
# Just make sure the tp_repr handler doesn't raise an exception.
# Live reference:
o = C()
wr = weakref.ref(o)
`wr`
# Dead reference:
del o
`wr`
def test_basic_callback(self):
self.check_basic_callback(C)
self.check_basic_callback(create_function)
self.check_basic_callback(create_bound_method)
self.check_basic_callback(create_unbound_method)
def test_multiple_callbacks(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del o
self.assert_(ref1() is None,
"expected reference to be invalidated")
self.assert_(ref2() is None,
"expected reference to be invalidated")
self.assert_(self.cbcalled == 2,
"callback not called the right number of times")
def test_multiple_selfref_callbacks(self):
# Make sure all references are invalidated before callbacks are called
#
# What's important here is that we're using the first
# reference in the callback invoked on the second reference
# (the most recently created ref is cleaned up first). This
# tests that all references to the object are invalidated
# before any of the callbacks are invoked, so that we only
# have one invocation of _weakref.c:cleanup_helper() active
# for a particular object at a time.
#
def callback(object, self=self):
self.ref()
c = C()
self.ref = weakref.ref(c, callback)
ref1 = weakref.ref(c, callback)
del c
def test_proxy_ref(self):
o = C()
o.bar = 1
ref1 = weakref.proxy(o, self.callback)
ref2 = weakref.proxy(o, self.callback)
del o
def check(proxy):
proxy.bar
self.assertRaises(weakref.ReferenceError, check, ref1)
self.assertRaises(weakref.ReferenceError, check, ref2)
self.assertRaises(weakref.ReferenceError, bool, weakref.proxy(C()))
self.assert_(self.cbcalled == 2)
def check_basic_ref(self, factory):
o = factory()
ref = weakref.ref(o)
self.assert_(ref() is not None,
"weak reference to live object should be live")
o2 = ref()
self.assert_(o is o2,
"<ref>() should return original object if live")
def check_basic_callback(self, factory):
self.cbcalled = 0
o = factory()
ref = weakref.ref(o, self.callback)
del o
self.assert_(self.cbcalled == 1,
"callback did not properly set 'cbcalled'")
self.assert_(ref() is None,
"ref2 should be dead after deleting object reference")
def test_ref_reuse(self):
o = C()
ref1 = weakref.ref(o)
# create a proxy to make sure that there's an intervening creation
# between these two; it should make no difference
proxy = weakref.proxy(o)
ref2 = weakref.ref(o)
self.assert_(ref1 is ref2,
"reference object w/out callback should be re-used")
o = C()
proxy = weakref.proxy(o)
ref1 = weakref.ref(o)
ref2 = weakref.ref(o)
self.assert_(ref1 is ref2,
"reference object w/out callback should be re-used")
self.assert_(weakref.getweakrefcount(o) == 2,
"wrong weak ref count for object")
del proxy
self.assert_(weakref.getweakrefcount(o) == 1,
"wrong weak ref count for object after deleting proxy")
def test_proxy_reuse(self):
o = C()
proxy1 = weakref.proxy(o)
ref = weakref.ref(o)
proxy2 = weakref.proxy(o)
self.assert_(proxy1 is proxy2,
"proxy object w/out callback should have been re-used")
def test_basic_proxy(self):
o = C()
self.check_proxy(o, weakref.proxy(o))
L = UserList.UserList()
p = weakref.proxy(L)
self.failIf(p, "proxy for empty UserList should be false")
p.append(12)
self.assertEqual(len(L), 1)
self.failUnless(p, "proxy for non-empty UserList should be true")
p[:] = [2, 3]
self.assertEqual(len(L), 2)
self.assertEqual(len(p), 2)
self.failUnless(3 in p,
"proxy didn't support __contains__() properly")
p[1] = 5
self.assertEqual(L[1], 5)
self.assertEqual(p[1], 5)
L2 = UserList.UserList(L)
p2 = weakref.proxy(L2)
self.assertEqual(p, p2)
## self.assertEqual(repr(L2), repr(p2))
L3 = UserList.UserList(range(10))
p3 = weakref.proxy(L3)
self.assertEqual(L3[:], p3[:])
self.assertEqual(L3[5:], p3[5:])
self.assertEqual(L3[:5], p3[:5])
self.assertEqual(L3[2:5], p3[2:5])
# The PyWeakref_* C API is documented as allowing either NULL or
# None as the value for the callback, where either means "no
# callback". The "no callback" ref and proxy objects are supposed
# to be shared so long as they exist by all callers so long as
# they are active. In Python 2.3.3 and earlier, this guaranttee
# was not honored, and was broken in different ways for
# PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.)
def test_shared_ref_without_callback(self):
self.check_shared_without_callback(weakref.ref)
def test_shared_proxy_without_callback(self):
self.check_shared_without_callback(weakref.proxy)
def check_shared_without_callback(self, makeref):
o = Object(1)
p1 = makeref(o, None)
p2 = makeref(o, None)
self.assert_(p1 is p2, "both callbacks were None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o, None)
self.assert_(p1 is p2, "callbacks were NULL, None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o)
self.assert_(p1 is p2, "both callbacks were NULL in the C API")
del p1, p2
p1 = makeref(o, None)
p2 = makeref(o)
self.assert_(p1 is p2, "callbacks were None, NULL in the C API")
def test_callable_proxy(self):
o = Callable()
ref1 = weakref.proxy(o)
self.check_proxy(o, ref1)
self.assert_(type(ref1) is weakref.CallableProxyType,
"proxy is not of callable type")
ref1('twinkies!')
self.assert_(o.bar == 'twinkies!',
"call through proxy not passed through to original")
ref1(x='Splat.')
self.assert_(o.bar == 'Splat.',
"call through proxy not passed through to original")
# expect due to too few args
self.assertRaises(TypeError, ref1)
# expect due to too many args
self.assertRaises(TypeError, ref1, 1, 2, 3)
def check_proxy(self, o, proxy):
o.foo = 1
self.assert_(proxy.foo == 1,
"proxy does not reflect attribute addition")
o.foo = 2
self.assert_(proxy.foo == 2,
"proxy does not reflect attribute modification")
del o.foo
self.assert_(not hasattr(proxy, 'foo'),
"proxy does not reflect attribute removal")
proxy.foo = 1
self.assert_(o.foo == 1,
"object does not reflect attribute addition via proxy")
proxy.foo = 2
self.assert_(
o.foo == 2,
"object does not reflect attribute modification via proxy")
del proxy.foo
self.assert_(not hasattr(o, 'foo'),
"object does not reflect attribute removal via proxy")
def test_proxy_deletion(self):
# Test clearing of SF bug #762891
class Foo:
result = None
def __delitem__(self, accessor):
self.result = accessor
g = Foo()
f = weakref.proxy(g)
del f[0]
self.assertEqual(f.result, 0)
def test_proxy_bool(self):
# Test clearing of SF bug #1170766
class List(list): pass
lyst = List()
self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst))
def test_getweakrefcount(self):
o = C()
ref1 = weakref.ref(o)
ref2 = weakref.ref(o, self.callback)
self.assert_(weakref.getweakrefcount(o) == 2,
"got wrong number of weak reference objects")
proxy1 = weakref.proxy(o)
proxy2 = weakref.proxy(o, self.callback)
self.assert_(weakref.getweakrefcount(o) == 4,
"got wrong number of weak reference objects")
del ref1, ref2, proxy1, proxy2
self.assert_(weakref.getweakrefcount(o) == 0,
"weak reference objects not unlinked from"
" referent when discarded.")
# assumes ints do not support weakrefs
self.assert_(weakref.getweakrefcount(1) == 0,
"got wrong number of weak reference objects for int")
def test_getweakrefs(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref1
self.assert_(weakref.getweakrefs(o) == [ref2],
"list of refs does not match")
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref2
self.assert_(weakref.getweakrefs(o) == [ref1],
"list of refs does not match")
del ref1
self.assert_(weakref.getweakrefs(o) == [],
"list of refs not cleared")
# assumes ints do not support weakrefs
self.assert_(weakref.getweakrefs(1) == [],
"list of refs does not match for int")
def test_newstyle_number_ops(self):
class F(float):
pass
f = F(2.0)
p = weakref.proxy(f)
self.assert_(p + 1.0 == 3.0)
self.assert_(1.0 + p == 3.0) # this used to SEGV
def test_callbacks_protected(self):
# Callbacks protected from already-set exceptions?
# Regression test for SF bug #478534.
class BogusError(Exception):
pass
data = {}
def remove(k):
del data[k]
def encapsulate():
f = lambda : ()
data[weakref.ref(f, remove)] = None
raise BogusError
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
def test_sf_bug_840829(self):
# "weakref callbacks and gc corrupt memory"
# subtype_dealloc erroneously exposed a new-style instance
# already in the process of getting deallocated to gc,
# causing double-deallocation if the instance had a weakref
# callback that triggered gc.
# If the bug exists, there probably won't be an obvious symptom
# in a release build. In a debug build, a segfault will occur
# when the second attempt to remove the instance from the "list
# of all objects" occurs.
import gc
class C(object):
pass
c = C()
wr = weakref.ref(c, lambda ignore: gc.collect())
del c
# There endeth the first part. It gets worse.
del wr
c1 = C()
c1.i = C()
wr = weakref.ref(c1.i, lambda ignore: gc.collect())
c2 = C()
c2.c1 = c1
del c1 # still alive because c2 points to it
# Now when subtype_dealloc gets called on c2, it's not enough just
# that c2 is immune from gc while the weakref callbacks associated
# with c2 execute (there are none in this 2nd half of the test, btw).
# subtype_dealloc goes on to call the base classes' deallocs too,
# so any gc triggered by weakref callbacks associated with anything
# torn down by a base class dealloc can also trigger double
# deallocation of c2.
del c2
def test_callback_in_cycle_1(self):
import gc
class J(object):
pass
class II(object):
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
# Now J and II are each in a self-cycle (as all new-style class
# objects are, since their __mro__ points back to them). I holds
# both a weak reference (I.wr) and a strong reference (I.J) to class
# J. I is also in a cycle (I.wr points to a weakref that references
# I.acallback). When we del these three, they all become trash, but
# the cycles prevent any of them from getting cleaned up immediately.
# Instead they have to wait for cyclic gc to deduce that they're
# trash.
#
# gc used to call tp_clear on all of them, and the order in which
# it does that is pretty accidental. The exact order in which we
# built up these things manages to provoke gc into running tp_clear
# in just the right order (I last). Calling tp_clear on II leaves
# behind an insane class object (its __mro__ becomes NULL). Calling
# tp_clear on J breaks its self-cycle, but J doesn't get deleted
# just then because of the strong reference from I.J. Calling
# tp_clear on I starts to clear I's __dict__, and just happens to
# clear I.J first -- I.wr is still intact. That removes the last
# reference to J, which triggers the weakref callback. The callback
# tries to do "self.J", and instances of new-style classes look up
# attributes ("J") in the class dict first. The class (II) wants to
# search II.__mro__, but that's NULL. The result was a segfault in
# a release build, and an assert failure in a debug build.
del I, J, II
gc.collect()
def test_callback_in_cycle_2(self):
import gc
# This is just like test_callback_in_cycle_1, except that II is an
# old-style class. The symptom is different then: an instance of an
# old-style class looks in its own __dict__ first. 'J' happens to
# get cleared from I.__dict__ before 'wr', and 'J' was never in II's
# __dict__, so the attribute isn't found. The difference is that
# the old-style II doesn't have a NULL __mro__ (it doesn't have any
# __mro__), so no segfault occurs. Instead it got:
# test_callback_in_cycle_2 (__main__.ReferencesTestCase) ...
# Exception exceptions.AttributeError:
# "II instance has no attribute 'J'" in <bound method II.acallback
# of <?.II instance at 0x00B9B4B8>> ignored
class J(object):
pass
class II:
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
del I, J, II
gc.collect()
def test_callback_in_cycle_3(self):
import gc
# This one broke the first patch that fixed the last two. In this
# case, the objects reachable from the callback aren't also reachable
# from the object (c1) *triggering* the callback: you can get to
# c1 from c2, but not vice-versa. The result was that c2's __dict__
# got tp_clear'ed by the time the c2.cb callback got invoked.
class C:
def cb(self, ignore):
self.me
self.c1
self.wr
c1, c2 = C(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2
gc.collect()
def test_callback_in_cycle_4(self):
import gc
# Like test_callback_in_cycle_3, except c2 and c1 have different
# classes. c2's class (C) isn't reachable from c1 then, so protecting
# objects reachable from the dying object (c1) isn't enough to stop
# c2's class (C) from getting tp_clear'ed before c2.cb is invoked.
# The result was a segfault (C.__mro__ was NULL when the callback
# tried to look up self.me).
class C(object):
def cb(self, ignore):
self.me
self.c1
self.wr
class D:
pass
c1, c2 = D(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2, C, D
gc.collect()
def test_callback_in_cycle_resurrection(self):
import gc
# Do something nasty in a weakref callback: resurrect objects
# from dead cycles. For this to be attempted, the weakref and
# its callback must also be part of the cyclic trash (else the
# objects reachable via the callback couldn't be in cyclic trash
# to begin with -- the callback would act like an external root).
# But gc clears trash weakrefs with callbacks early now, which
# disables the callbacks, so the callbacks shouldn't get called
# at all (and so nothing actually gets resurrected).
alist = []
class C(object):
def __init__(self, value):
self.attribute = value
def acallback(self, ignore):
alist.append(self.c)
c1, c2 = C(1), C(2)
c1.c = c2
c2.c = c1
c1.wr = weakref.ref(c2, c1.acallback)
c2.wr = weakref.ref(c1, c2.acallback)
def C_went_away(ignore):
alist.append("C went away")
wr = weakref.ref(C, C_went_away)
del c1, c2, C # make them all trash
self.assertEqual(alist, []) # del isn't enough to reclaim anything
gc.collect()
# c1.wr and c2.wr were part of the cyclic trash, so should have
# been cleared without their callbacks executing. OTOH, the weakref
# to C is bound to a function local (wr), and wasn't trash, so that
# callback should have been invoked when C went away.
self.assertEqual(alist, ["C went away"])
# The remaining weakref should be dead now (its callback ran).
self.assertEqual(wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_callbacks_on_callback(self):
import gc
# Set up weakref callbacks *on* weakref callbacks.
alist = []
def safe_callback(ignore):
alist.append("safe_callback called")
class C(object):
def cb(self, ignore):
alist.append("cb called")
c, d = C(), C()
c.other = d
d.other = c
callback = c.cb
c.wr = weakref.ref(d, callback) # this won't trigger
d.wr = weakref.ref(callback, d.cb) # ditto
external_wr = weakref.ref(callback, safe_callback) # but this will
self.assert_(external_wr() is callback)
# The weakrefs attached to c and d should get cleared, so that
# C.cb is never called. But external_wr isn't part of the cyclic
# trash, and no cyclic trash is reachable from it, so safe_callback
# should get invoked when the bound method object callback (c.cb)
# -- which is itself a callback, and also part of the cyclic trash --
# gets reclaimed at the end of gc.
del callback, c, d, C
self.assertEqual(alist, []) # del isn't enough to clean up cycles
gc.collect()
self.assertEqual(alist, ["safe_callback called"])
self.assertEqual(external_wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_gc_during_ref_creation(self):
self.check_gc_during_creation(weakref.ref)
def test_gc_during_proxy_creation(self):
self.check_gc_during_creation(weakref.proxy)
def check_gc_during_creation(self, makeref):
thresholds = gc.get_threshold()
gc.set_threshold(1, 1, 1)
gc.collect()
class A:
pass
def callback(*args):
pass
referenced = A()
a = A()
a.a = a
a.wr = makeref(referenced)
try:
# now make sure the object and the ref get labeled as
# cyclic trash:
a = A()
weakref.ref(referenced, callback)
finally:
gc.set_threshold(*thresholds)
class SubclassableWeakrefTestCase(unittest.TestCase):
def test_subclass_refs(self):
class MyRef(weakref.ref):
def __init__(self, ob, callback=None, value=42):
self.value = value
super(MyRef, self).__init__(ob, callback)
def __call__(self):
self.called = True
return super(MyRef, self).__call__()
o = Object("foo")
mr = MyRef(o, value=24)
self.assert_(mr() is o)
self.assert_(mr.called)
self.assertEqual(mr.value, 24)
del o
self.assert_(mr() is None)
self.assert_(mr.called)
def test_subclass_refs_dont_replace_standard_refs(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
self.assert_(r1 is not r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
self.assert_(r2 is refs[0])
self.assert_(r1 in refs[1:])
self.assert_(r3 in refs[1:])
def test_subclass_refs_dont_conflate_callbacks(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
self.assert_(r1 is not r2)
refs = weakref.getweakrefs(o)
self.assert_(r1 in refs)
self.assert_(r2 in refs)
def test_subclass_refs_with_slots(self):
class MyRef(weakref.ref):
__slots__ = "slot1", "slot2"
def __new__(type, ob, callback, slot1, slot2):
return weakref.ref.__new__(type, ob, callback)
def __init__(self, ob, callback, slot1, slot2):
self.slot1 = slot1
self.slot2 = slot2
def meth(self):
return self.slot1 + self.slot2
o = Object(42)
r = MyRef(o, None, "abc", "def")
self.assertEqual(r.slot1, "abc")
self.assertEqual(r.slot2, "def")
self.assertEqual(r.meth(), "abcdef")
self.failIf(hasattr(r, "__dict__"))
class Object:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return "<Object %r>" % self.arg
class MappingTestCase(TestBase):
COUNT = 10
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
self.assert_(weakref.getweakrefcount(o) == 1,
"wrong number of weak references to %r!" % o)
self.assert_(o is dict[o.arg],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
items1.sort()
items2.sort()
self.assert_(items1 == items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
self.assert_(len(dict) == self.COUNT)
del objects[0]
self.assert_(len(dict) == (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assert_(len(dict) == 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
self.assertRaises(KeyError, dict.__getitem__, 1)
dict[2] = C()
self.assertRaises(KeyError, dict.__getitem__, 2)
def test_weak_keys(self):
#
# This exercises d.copy(), d.items(), d[] = v, d[], del d[],
# len(d), d.has_key().
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
self.assert_(weakref.getweakrefcount(o) == 1,
"wrong number of weak references to %r!" % o)
self.assert_(o.arg is dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
self.assert_(set(items1) == set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
self.assert_(len(dict) == self.COUNT)
del objects[0]
self.assert_(len(dict) == (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
self.assert_(len(dict) == 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
self.assert_(dict.has_key(o))
self.assert_(not dict.has_key(34))
def test_weak_keyed_iters(self):
dict, objects = self.make_weak_keyed_dict()
self.check_iters(dict)
# Test keyrefs()
refs = dict.keyrefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assert_(dict.has_key(ob))
self.assert_(ob in dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test iterkeyrefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.iterkeyrefs())), len(objects))
for wr in dict.iterkeyrefs():
ob = wr()
self.assert_(dict.has_key(ob))
self.assert_(ob in dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def test_weak_valued_iters(self):
dict, objects = self.make_weak_valued_dict()
self.check_iters(dict)
# Test valuerefs()
refs = dict.valuerefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test itervaluerefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.itervaluerefs())), len(objects))
for wr in dict.itervaluerefs():
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def check_iters(self, dict):
# item iterator:
items = dict.items()
for item in dict.iteritems():
items.remove(item)
self.assert_(len(items) == 0, "iteritems() did not touch all items")
# key iterator, via __iter__():
keys = dict.keys()
for k in dict:
keys.remove(k)
self.assert_(len(keys) == 0, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = dict.keys()
for k in dict.iterkeys():
keys.remove(k)
self.assert_(len(keys) == 0, "iterkeys() did not touch all keys")
# value iterator:
values = dict.values()
for v in dict.itervalues():
values.remove(v)
self.assert_(len(values) == 0,
"itervalues() did not touch all values")
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
self.assert_(dict[o] == 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
self.assert_(dict[o] == 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
objects = map(Object, range(self.COUNT))
for o in objects:
dict[o] = o.arg
return dict, objects
def make_weak_valued_dict(self):
dict = weakref.WeakValueDictionary()
objects = map(Object, range(self.COUNT))
for o in objects:
dict[o.arg] = o
return dict, objects
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assert_(len(weakdict) == 2)
k, v = weakdict.popitem()
self.assert_(len(weakdict) == 1)
if k is key1:
self.assert_(v is value1)
else:
self.assert_(v is value2)
k, v = weakdict.popitem()
self.assert_(len(weakdict) == 0)
if k is key1:
self.assert_(v is value1)
else:
self.assert_(v is value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
"key1", C(), "key2", C())
def test_weak_keyed_dict_popitem(self):
self.check_popitem(weakref.WeakKeyDictionary,
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assert_(value1 is not value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assert_(o is value1)
self.assert_(weakdict.has_key(key))
self.assert_(weakdict.get(key) is value1)
self.assert_(weakdict[key] is value1)
o = weakdict.setdefault(key, value2)
self.assert_(o is value1)
self.assert_(weakdict.has_key(key))
self.assert_(weakdict.get(key) is value1)
self.assert_(weakdict[key] is value1)
def test_weak_valued_dict_setdefault(self):
self.check_setdefault(weakref.WeakValueDictionary,
"key", C(), C())
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(weakref.WeakKeyDictionary,
C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), d.has_key(),
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assert_(len(weakdict) == len(dict))
for k in weakdict.keys():
self.assert_(dict.has_key(k),
"mysterious new key appeared in weak dict")
v = dict.get(k)
self.assert_(v is weakdict[k])
self.assert_(v is weakdict.get(k))
for k in dict.keys():
self.assert_(weakdict.has_key(k),
"original key disappeared in weak dict")
v = dict[k]
self.assert_(v is weakdict[k])
self.assert_(v is weakdict.get(k))
def test_weak_valued_dict_update(self):
self.check_update(weakref.WeakValueDictionary,
{1: C(), 'a': C(), C(): C()})
def test_weak_keyed_dict_update(self):
self.check_update(weakref.WeakKeyDictionary,
{C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = weakref.WeakKeyDictionary()
o1 = Object('1')
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
self.assert_(len(d) == 2)
del d[o1]
self.assert_(len(d) == 1)
self.assert_(d.keys() == [o2])
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
o1 = Object('1')
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
self.assert_(len(d) == 2)
del d['something']
self.assert_(len(d) == 1)
self.assert_(d.items() == [('something else', o2)])
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
o = Object('1')
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_weak_keyed_cascading_deletes(self):
# SF bug 742860. For some reason, before 2.3 __delitem__ iterated
# over the keys via self.data.iterkeys(). If things vanished from
# the dict during this (or got added), that caused a RuntimeError.
d = weakref.WeakKeyDictionary()
mutate = False
class C(object):
def __init__(self, i):
self.value = i
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if mutate:
# Side effect that mutates the dict, by removing the
# last strong reference to a key.
del objs[-1]
return self.value == other.value
objs = [C(i) for i in range(4)]
for o in objs:
d[o] = o.value
del o # now the only strong references to keys are in objs
# Find the order in which iterkeys sees the keys.
objs = d.keys()
# Reverse it, so that the iteration implementation of __delitem__
# has to keep looping to find the first object we delete.
objs.reverse()
# Turn on mutation in C.__eq__. The first time thru the loop,
# under the iterkeys() business the first comparison will delete
# the last item iterkeys() would see, and that causes a
# RuntimeError: dictionary changed size during iteration
# when the iterkeys() loop goes around to try comparing the next
# key. After this was fixed, it just deletes the last object *our*
# "for o in obj" loop would have gotten to.
mutate = True
count = 0
for o in objs:
count += 1
del d[o]
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
from test import mapping_tests
class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakValueDictionary conforms to the mapping protocol"""
__ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)}
type2test = weakref.WeakValueDictionary
def _reference(self):
return self.__ref.copy()
class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakKeyDictionary conforms to the mapping protocol"""
__ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3}
type2test = weakref.WeakKeyDictionary
def _reference(self):
return self.__ref.copy()
libreftest = """ Doctest for examples in the library reference: libweakref.tex
>>> import weakref
>>> class Dict(dict):
... pass
...
>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable
>>> r = weakref.ref(obj)
>>> print r() is obj
True
>>> import weakref
>>> class Object:
... pass
...
>>> o = Object()
>>> r = weakref.ref(o)
>>> o2 = r()
>>> o is o2
True
>>> del o, o2
>>> print r()
None
>>> import weakref
>>> class ExtendedRef(weakref.ref):
... def __init__(self, ob, callback=None, **annotations):
... super(ExtendedRef, self).__init__(ob, callback)
... self.__counter = 0
... for k, v in annotations.iteritems():
... setattr(self, k, v)
... def __call__(self):
... '''Return a pair containing the referent and the number of
... times the reference has been called.
... '''
... ob = super(ExtendedRef, self).__call__()
... if ob is not None:
... self.__counter += 1
... ob = (ob, self.__counter)
... return ob
...
>>> class A: # not in docs from here, just testing the ExtendedRef
... pass
...
>>> a = A()
>>> r = ExtendedRef(a, foo=1, bar="baz")
>>> r.foo
1
>>> r.bar
'baz'
>>> r()[1]
1
>>> r()[1]
2
>>> r()[0] is a
True
>>> import weakref
>>> _id2obj_dict = weakref.WeakValueDictionary()
>>> def remember(obj):
... oid = id(obj)
... _id2obj_dict[oid] = obj
... return oid
...
>>> def id2obj(oid):
... return _id2obj_dict[oid]
...
>>> a = A() # from here, just testing
>>> a_id = remember(a)
>>> id2obj(a_id) is a
True
>>> del a
>>> try:
... id2obj(a_id)
... except KeyError:
... print 'OK'
... else:
... print 'WeakValueDictionary error'
OK
"""
__test__ = {'libreftest' : libreftest}
def test_main():
test_support.run_unittest(
ReferencesTestCase,
MappingTestCase,
WeakValueDictionaryTestCase,
WeakKeyDictionaryTestCase,
)
test_support.run_doctest(sys.modules[__name__])
if __name__ == "__main__":
test_main()
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2019, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Strongly Typed Class Attributes
######################################
This module provides a typed attribute class and class decorator/base class for
creating classes with type enforced attributes. Enforcing an attribute's type is
handled by Python's property mechanism (the property's set function checks the
value's type). A simple usage example follows.
.. code-block:: Python
@typed
class Foo(object):
bar = Typed(int, doc="Always an integer type")
The simple code example generates code similar to the following when the module
is executed (i.e. imported).
.. code-block:: python
class Foo(object):
@property
def bar(self):
return self._bar
@bar.setter
def bar(self, value):
if not isinstance(value, int):
try:
value = int(value)
except Exception as e:
raise TypeError("Cannot convert value") from e
self._bar = value
The :class:`~exa.typed.Typed` object additionally provides mechanisms for
triggering function calls before and after get, set, and delete, and attempts
automatic conversion (as shown above) for all types supported for a given
attribute.
"""
import six
import pandas as pd
import warnings
def _typed_from_items(items):
"""
Construct strongly typed attributes (properties) from a dictionary of
name and :class:`~exa.typed.Typed` object pairs.
See Also:
:func:`~exa.typed.typed`
"""
dct = {}
for name, attr in items:
if isinstance(attr, Typed):
dct[name] = attr(name)
return dct
def typed(cls):
"""
Class decorator that updates a class definition with strongly typed
property attributes.
See Also:
If the class will be inherited, use :class:`~exa.typed.TypedClass`.
"""
for name, attr in _typed_from_items(vars(cls).items()).items():
setattr(cls, name, attr)
return cls
def yield_typed(obj_or_cls):
"""
Generator that yields typed object names of the class (or object's class).
Args:
obj_or_cls (object): Class object or instance of class
Returns:
name (array): Names of class attributes that are strongly typed
"""
if not isinstance(obj_or_cls, type):
obj_or_cls = type(obj_or_cls)
for attrname in dir(obj_or_cls):
if hasattr(obj_or_cls, attrname):
attr = getattr(obj_or_cls, attrname)
# !!! Important hardcoded value here !!!
if (isinstance(attr, property) and isinstance(attr.__doc__, six.string_types)
and "__typed__" in attr.__doc__):
yield attrname
class Typed(object):
"""
A representation of a strongly typed class attribute.
.. code-block:: Python
@typed
class Strong(object):
foo = Typed(int, doc="my int")
The above example creates a class object that has a property-like attribute
which requires its value to be of type int. Additional arguments provide
the ability to have the property's getter, setter, and deleter functions call
other functions or methods of the class. If provided by the class, strongly
typed attributes created by here automatically attempt to to set themselves
(see below).
.. code-block:: Python
@typed
class Strong(object):
_setters = ("_set", )
foo = Typed(int, doc="my int")
def _set_foo(self):
self.foo = 42
By defining a `_getters` class attribute the strongly typed property knows that,
if the foo attribute's value (i.e. `_foo`) is not defined (or is defined as None),
that the property getter should first call the `_set_foo` class method, and after
it should proceed with getting the property value. Note that `_set_foo` cannot
accept arguments (it must be 'automatic').
Args:
types (iterable, type): Iterable of types or type
doc (str): Documentation
autoconv (bool): Attempt automatic type conversion when setting (default true)
allow_none (bool): As an additional type, allow None (default true)
pre_set (callable, str): Callable or class method name called before setter
post_set (callable, str): Callabel or class method name called after setter
pre_get (callable, str): Callable or class method name called before getter
pre_del (callable, str): Callable or class method name called before setter
post_del (callable, str): Callabel or class method name called after setter
Warning:
Automatic type conversion (autoconv = true) is not guaranteed to work in
all cases and is known to fail for non-Python objects such as numpy
ndarray types: Setting **autoconv** to false is recommened for these cases.
.. code-block:: python
Typed(np.ndarray, autoconv=False) # Do not attempt auto conversion
"""
def __call__(self, name):
"""
Construct the property.
Args:
name (str): Attribute (property) name
Returns:
prop (property): Custom property definition with support for typing
"""
priv = "_" + name # Reference to the variable's value
# The following is a definition of a Python property. Properties have
# get, set, and delete functions as well as documentation. The variable
# "this" references the class object instance where the property exists;
# it does not reference the instance of this ("Typed") class.
def getter(this):
# If the variable value (reference by priv) does not exist
# or is None AND the class has some automatic way of setting the value,
# set the value first then proceed to getting it.
if ((not hasattr(this, priv) or getattr(this, priv) is None) and
hasattr(this, "_setters") and isinstance(this._setters, (list, tuple))):
for prefix in this._setters:
cmd = "{}{}".format(prefix, priv)
if hasattr(this, cmd):
getattr(this, cmd)() # Automatic method call
if hasattr(this, priv):
break
# Perform pre-get actions (if any)
if isinstance(self.pre_get, str):
getattr(this, self.pre_get)()
elif callable(self.pre_get):
self.pre_get(this)
return getattr(this, priv, None) # Returns None by default
def setter(this, value):
# If auto-conversion is on and the value is not the correct type (and
# also is not None), attempt to convert types
if self.autoconv and not isinstance(value, self.types) and value is not None:
for t in self.types:
try:
value = t(value)
break
except Exception as e: # Catch all exceptions but if conversion fails ...
if self.verbose:
warnings.warn("Conversion of {} (with type {}) failed to type {}\n{}".format(name, type(value), t, str(e)))
else: # ... raise a TypeError
raise TypeError("Cannot convert object of type {} to any of {}.".format(type(value), self.types))
# If the value is none and none is not allowed,
# or the value is some other type (that is not none) and not of a type
# that is allowed, raise an error.
elif ((value is None and self.allow_none == False) or
(not isinstance(value, self.types) and value is not None)):
raise TypeError("Object '{}' cannot have type {}, must be of type(s) {}.".format(name, type(value), self.types))
# Perform pre-set actions (if any)
if isinstance(self.pre_set, str):
getattr(this, self.pre_set)()
elif callable(self.pre_set):
self.pre_set(this)
if isinstance(this, (pd.DataFrame, pd.SparseDataFrame)):
this[priv] = value
else:
setattr(this, priv, value) # Set the property value
# Perform post-set actions (if any)
if isinstance(self.post_set, str):
getattr(this, self.post_set)()
elif callable(self.post_set):
self.post_set(this)
def deleter(this):
# Perform pre-del actions (if any)
if isinstance(self.pre_del, str):
getattr(this, self.pre_del)()
elif callable(self.pre_del):
self.pre_del(this)
delattr(this, priv) # Delete the attribute (allows for dynamic naming)
# Perform post-del actions (if any)
if isinstance(self.post_del, str):
getattr(this, self.post_del)()
elif callable(self.post_del):
self.post_del(this)
return property(getter, setter, deleter, doc=self.doc)
def __init__(self, types, doc=None, autoconv=True, pre_set=None, allow_none=True,
post_set=None, pre_get=None, pre_del=None, post_del=None, verbose=False):
self.types = types if isinstance(types, (tuple, list)) else (types, )
self.doc = str(doc) + "\n\n__typed__"
self.autoconv = autoconv
self.allow_none = allow_none
self.pre_set = pre_set
self.post_set = post_set
self.pre_get = pre_get
self.pre_del = pre_del
self.post_del = post_del
self.verbose = verbose
class TypedMeta(type):
"""
A metaclass for creating typed attributes which can be used instead of
the class decorator.
.. code-block:: Python
class Foo(six.with_metaclass(TypedMeta, object)):
bar = Typed(int, doc="Always an int")
See Also:
:func:`~exa.typed.typed` and :mod:`~exa.core.data`
"""
def __new__(mcs, name, bases, namespace):
namespace.update(_typed_from_items(namespace.items()))
return super(TypedMeta, mcs).__new__(mcs, name, bases, namespace)
class TypedClass(six.with_metaclass(TypedMeta, object)):
"""
A mixin class which can be used to create a class with strongly typed
attributes.
.. code-block:: Python
class Foo(TypedClass):
bar = Typed(int, doc="Still an int")
See Also:
:func:`~exa.typed.typed`
"""
pass
| |
"""\
Code for restricted open-shell hartree fock programs in PyQuante.
A good reference for the equations here is 'The Self-Consistent Field
Equations for Generalized Valence Bond and Open-Shell Hartree-Fock
Wave Functions', F. W. Bobrowicz and W. A. Goddard, III. in 'Methods
of Electronic Structure Theory', H. F. Schaefer, III, ed., Plenum
Publishing Company, 1977.
This program is part of the PyQuante quantum chemistry program suite.
Status: Closed shell cases work with the open shell code.
Copyright (c) 2004, Richard P. Muller. All Rights Reserved.
PyQuante version 1.2 and later is covered by the modified BSD
license. Please see the file LICENSE that is part of this
distribution.
"""
from PyQuante.Ints import getbasis,getints,getJ,getK,get2JmK
from PyQuante.LA2 import mkdens,geigh,trace2,simx
from PyQuante.NumWrap import zeros,transpose,matrixmultiply,eigh,dot
from PyQuante.NumWrap import identity,take
from PyQuante.hartree_fock import get_energy
from math import sqrt
def get_os_dens(orbs,f,noccsh):
istart = iend = 0
nsh = len(f)
Ds = [ ]
assert len(f) == len(noccsh)
for ish in range(nsh):
iend += noccsh[ish]
Ds.append(mkdens(orbs,istart,iend))
istart = iend
return Ds
def get_os_hams(Ints,Ds):
# GVB2P5 did this a little more efficiently; they stored
# 2J-K for the core, then J,K for each open shell. Didn't
# seem worth it here, so I'm jst storing J,K separately
Hs = []
for D in Ds:
Hs.append(getJ(Ints,D))
Hs.append(getK(Ints,D))
return Hs
def get_orbs_in_shell(ish,noccsh,norb):
# Construct the list of orbitals that must be
# considered in the active space for the ith shell
nocc = sum(noccsh)
vstart,vend = nocc,norb
istart = sum(noccsh[:ish])
iend = sum(noccsh[:ish+1])
return range(istart,iend)+range(vstart,vend)
def get_os_fock(ish,nsh,f,a,b,h,Hs,**kwargs):
nof = kwargs.get('nof',False)
# Form the Fock matrix for shell ish:
if nof:
F = h
else:
F = f[ish]*h
for jsh in range(nsh):
if nof:
F += a[ish,jsh]*Hs[2*jsh]/f[ish]+b[ish,jsh]*Hs[2*jsh+1]/f[ish]
else:
F += a[ish,jsh]*Hs[2*jsh]+b[ish,jsh]*Hs[2*jsh+1]
return F
def update_orbe(orbs_in_shell,orbe,mo_orbe):
for i,iorb in enumerate(orbs_in_shell):
orbe[iorb] = mo_orbe[i]
return
def update_orbs(orbs_in_shell,orbs,new_orbs):
for i,iorb in enumerate(orbs_in_shell):
orbs[:,iorb] = new_orbs[:,i]
return
def ocbse(orbs,h,Hs,f,a,b,noccsh):
# Need to write this so that we don't need the orbs 3 times!
nsh = len(noccsh)
nbf = norb = h.shape[0]
orbe = zeros(norb,'d')
for ish in range(nsh):
orbs_in_shell = get_orbs_in_shell(ish,noccsh,norb)
F = get_os_fock(ish,nsh,f,a,b,h,Hs)
# form the orbital space of all of the orbs in ish plus the virts
T = orbs.take(orbs_in_shell,1)
#print "take worked? ",(T==get_orbs(orbs,orbs_in_shell)).all()
# Transform to MO space
Fmo = ao2mo(F,T)
mo_orbe,mo_orbs = eigh(Fmo)
T = matrixmultiply(T,mo_orbs)
# Insert orbital energies into the right place
update_orbe(orbs_in_shell,orbe,mo_orbe)
update_orbs(orbs_in_shell,orbs,T)
return orbe,orbs
def get_orbs(orbs,orbs_in_shell):
"This should do the same thing as take(orbs,orbs_in_shell,1)"
A = zeros((orbs.shape[0],len(orbs_in_shell)),'d')
for i,iorb in enumerate(orbs_in_shell):
A[:,i] = orbs[:,iorb]
return A
def rotion(orbs,h,Hs,f,a,b,noccsh):
nsh = len(noccsh)
nocc = sum(noccsh)
if nsh == 1: return orbs # No effect for closed shell systems
rot = get_rot(h,Hs,f,a,b,noccsh)
print "Rotation matrix:\n",rot
erot = expmat(rot)
print "Exp rotation matrix:\n",erot
T = matrixmultiply(orbs[:,:nocc],erot)
orbs[:,:nocc] = T
return orbs
def expmats(A):
# For testing agains scipy
from scipy.linalg.matfuncs import expm
return expm(A)
def expmat(A,**kwargs):
nmax = kwargs.get('nmax',12)
cut = kwargs.get('cut',1e-8)
E = identity(A.shape[0],'d')
D = E
for i in range(1,nmax):
D = matrixmultiply(D,A)/i
E += D
maxel = D.max()
if abs(maxel) < cut:
break
else:
print "Warning: expmat unconverged after %d iters: %g" % (nmax,maxel)
return E
def get_sh(i,noccsh):
nsh = len(noccsh)
isum = 0
for ish in range(nsh):
isum += noccsh[ish]
if i < isum:
return i
return None
def get_rot(h,Hs,f,a,b,noccsh):
nocc = sum(noccsh)
nsh = len(noccsh)
rot = zeros((nocc,nocc),'d')
for i in range(nocc):
ish = get_sh(i,noccsh)
for j in range(nocc):
jsh = get_sh(j,noccsh)
if jsh == ish: continue
Wij = -0.5*(h[i,j]+Hs[0][i,j])
Wii = -0.5*(h[i,i]+Hs[0][i,i])
Wjj = -0.5*(h[j,j]+Hs[0][j,j])
for k in range(nsh):
Wij = Wij - 0.5*Hs[2*i+1][i,j]
Wii = Wij - 0.5*Hs[2*i+1][i,i]
Wjj = Wij - 0.5*Hs[2*i+1][j,j]
Jij = Hs[2*jsh][i,i]
Kij = Hs[2*jsh+1][i,i]
gamma = Kij-0.5*(Kij+Jij)
Xij = -Wij
Bij = Wii-Wjj+gamma
if Bij > 0:
Rij = -Xij/Bij
else:
Rij = Xij/Bij
rot[i,j] = rot[j,i] = Rij
return rot
def get_noccsh(nclosed,nopen):
# Get noccsh from open/closed
noccsh = []
if nclosed: noccsh.append(nclosed)
if nopen: noccsh.append(nopen)
return noccsh
def get_fab(nclosed,nopen):
f = []
iopen_start = 0
if nclosed:
f.append(1.0)
iopen_start = 1
if nopen:
f.append(0.5)
nsh = len(f)
a = zeros((nsh,nsh),'d')
b = zeros((nsh,nsh),'d')
for i in range(nsh):
for j in range(nsh):
a[i,j] = 2.*f[i]*f[j]
b[i,j] = -f[i]*f[j]
if nopen == 1:
a[iopen_start,iopen_start] = 0
b[iopen_start,iopen_start] = 0
elif nopen > 1:
b[iopen_start,iopen_start] = -0.5
return f,a,b
def rohf_wag(atoms,noccsh=None,f=None,a=None,b=None,**kwargs):
"""\
rohf(atoms,noccsh=None,f=None,a=None,b=None,**kwargs):
Restricted open shell HF driving routine
atoms A Molecule object containing the system of interest
"""
ConvCriteria = kwargs.get('ConvCriteria',1e-4)
MaxIter = kwargs.get('MaxIter',25)
DoAveraging = kwargs.get('DoAveraging',False)
verbose = kwargs.get('verbose',True)
bfs = kwargs.get('bfs',None)
if not bfs:
basis_data = kwargs.get('basis_data',None)
bfs = getbasis(atoms,basis_data)
integrals = kwargs.get('integrals', None)
if integrals:
S,h,Ints = integrals
else:
S,h,Ints = getints(bfs,atoms)
nel = atoms.get_nel()
orbs = kwargs.get('orbs',None)
if orbs is None:
orbe,orbs = geigh(h,S)
nclosed,nopen = atoms.get_closedopen()
nocc = nopen+nclosed
if not noccsh: noccsh = get_noccsh(nclosed,nopen)
nsh = len(noccsh)
nbf = norb = len(bfs)
if not f:
f,a,b = get_fab(nclosed,nopen)
if verbose:
print "ROHF calculation"
print "nsh = ",nsh
print "noccsh = ",noccsh
print "f = ",f
print "a_ij: "
for i in range(nsh):
for j in range(i+1):
print a[i,j],
print
print "b_ij: "
for i in range(nsh):
for j in range(i+1):
print b[i,j],
print
enuke = atoms.get_enuke()
energy = eold = 0.
for i in range(MaxIter):
Ds = get_os_dens(orbs,f,noccsh)
Hs = get_os_hams(Ints,Ds)
orbs = rotion(orbs,h,Hs,f,a,b,noccsh)
orbe,orbs = ocbse(orbs,h,Hs,f,a,b,noccsh)
orthogonalize(orbs,S)
# Compute the energy
eone = sum(f[ish]*trace2(Ds[ish],h) for ish in range(nsh))
energy = enuke+eone+sum(orbe[:nocc])
print energy,eone
if abs(energy-eold) < ConvCriteria: break
eold = energy
return energy,orbe,orbs
def rohf(atoms,**opts):
"""\
rohf(atoms,**opts) - Restriced Open Shell Hartree Fock
atoms A Molecule object containing the molecule
"""
ConvCriteria = opts.get('ConvCriteria',1e-5)
MaxIter = opts.get('MaxIter',40)
DoAveraging = opts.get('DoAveraging',True)
averaging = opts.get('averaging',0.95)
verbose = opts.get('verbose',True)
bfs = opts.get('bfs',None)
if not bfs:
basis_data = opts.get('basis_data',None)
bfs = getbasis(atoms,basis_data)
nbf = len(bfs)
integrals = opts.get('integrals', None)
if integrals:
S,h,Ints = integrals
else:
S,h,Ints = getints(bfs,atoms)
nel = atoms.get_nel()
nalpha,nbeta = atoms.get_alphabeta()
S,h,Ints = getints(bfs,atoms)
orbs = opts.get('orbs',None)
if orbs is None:
orbe,orbs = geigh(h,S)
norbs = nbf
enuke = atoms.get_enuke()
eold = 0.
if verbose: print "ROHF calculation on %s" % atoms.name
if verbose: print "Nbf = %d" % nbf
if verbose: print "Nalpha = %d" % nalpha
if verbose: print "Nbeta = %d" % nbeta
if verbose: print "Averaging = %s" % DoAveraging
print "Optimization of HF orbitals"
for i in range(MaxIter):
if verbose: print "SCF Iteration:",i,"Starting Energy:",eold
Da = mkdens(orbs,0,nalpha)
Db = mkdens(orbs,0,nbeta)
if DoAveraging:
if i:
Da = averaging*Da + (1-averaging)*Da0
Db = averaging*Db + (1-averaging)*Db0
Da0 = Da
Db0 = Db
Ja = getJ(Ints,Da)
Jb = getJ(Ints,Db)
Ka = getK(Ints,Da)
Kb = getK(Ints,Db)
Fa = h+Ja+Jb-Ka
Fb = h+Ja+Jb-Kb
energya = get_energy(h,Fa,Da)
energyb = get_energy(h,Fb,Db)
eone = (trace2(Da,h) + trace2(Db,h))/2
etwo = (trace2(Da,Fa) + trace2(Db,Fb))/2
energy = (energya+energyb)/2 + enuke
print i,energy,eone,etwo,enuke
if abs(energy-eold) < ConvCriteria: break
eold = energy
Fa = ao2mo(Fa,orbs)
Fb = ao2mo(Fb,orbs)
# Building the approximate Fock matrices in the MO basis
F = 0.5*(Fa+Fb)
K = Fb-Fa
# The Fock matrix now looks like
# F-K | F + K/2 | F
# ---------------------------------
# F + K/2 | F | F - K/2
# ---------------------------------
# F | F - K/2 | F + K
# Make explicit slice objects to simplify this
do = slice(0,nbeta)
so = slice(nbeta,nalpha)
uo = slice(nalpha,norbs)
F[do,do] -= K[do,do]
F[uo,uo] += K[uo,uo]
F[do,so] += 0.5*K[do,so]
F[so,do] += 0.5*K[so,do]
F[so,uo] -= 0.5*K[so,uo]
F[uo,so] -= 0.5*K[uo,so]
orbe,mo_orbs = eigh(F)
orbs = matrixmultiply(orbs,mo_orbs)
if verbose:
print "Final ROHF energy for system %s is %f" % (atoms.name,energy)
return energy,orbe,orbs
def ao2mo(M,C): return simx(M,C)
def mo2ao(M,C,S):
SC = matrixmultiply(S,C)
return simx(M,SC,'t')
def symmetrize(A): return (A+A.T)/2
def printmat(mat,name='mat',**kwargs):
istart = kwargs.get('istart',0)
istop = kwargs.get('istop',4)
jstart = kwargs.get('jstart',0)
jstop = kwargs.get('jstop',4)
suppress = kwargs.get('suppress',True)
print name,'\n',mat[istart:istop,jstart:jstop]
return
def orthogonalize(orbs,S):
nbf,norb = orbs.shape
Smax = 0
for i in range(norb):
for j in range(i):
Sij = dot(orbs[:,j],dot(S,orbs[:,i]))
Smax = max(Smax,abs(Sij))
orbs[:,i] -= Sij*orbs[:,j]
Sii = dot(orbs[:,i],dot(S,orbs[:,i]))
orbs[:,i] /= sqrt(Sii)
print "Max orthogonalized element = ",Smax
return
if __name__ == '__main__':
from PyQuante.Molecule import Molecule
from PyQuante.hartree_fock import hf
h = Molecule('H',[(1,(0,0,0))],multiplicity=2)
he = Molecule('He',[(2,(0,0,0))])
li = Molecule('Li',[(3,(0,0,0))],multiplicity=2)
be = Molecule('Be',[(4,(0,0,0))],multiplicity=3)
mol = li
print "HF results (for comparison)"
energy,orbe,orbs = rohf(mol)
print "ROHF Energy = ",energy
print "ROHF spectrum: \n",orbe
#energy,orbe,orbs = rohf_wag(mol,orbs=orbs)
energy,orbe,orbs = rohf_wag(mol)
print "ROHF Energy = ",energy
print "ROHF spectrum: \n",orbe
| |
# coding=utf-8
# Copyright 2022 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the sparse_optimizers file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from absl import flags
import absl.testing.parameterized as parameterized
import numpy as np
from rigl import sparse_optimizers
from rigl import sparse_utils
import tensorflow.compat.v1 as tf # tf
from tensorflow.contrib.model_pruning.python import pruning
from tensorflow.contrib.model_pruning.python.layers import layers
FLAGS = flags.FLAGS
class SparseSETOptimizerTest(tf.test.TestCase, parameterized.TestCase):
def _setup_graph(self, n_inp, n_out, drop_frac, start_iter=1, end_iter=4,
freq_iter=2):
"""Setups a trivial training procedure for sparse training."""
tf.reset_default_graph()
optim = tf.train.GradientDescentOptimizer(0.1)
sparse_optim = sparse_optimizers.SparseSETOptimizer(
optim, start_iter, end_iter, freq_iter, drop_fraction=drop_frac)
x = tf.random.uniform((1, n_inp))
y = layers.masked_fully_connected(x, n_out, activation_fn=None)
global_step = tf.train.get_or_create_global_step()
weight = pruning.get_weights()[0]
# There is one masked layer to be trained.
mask = pruning.get_masks()[0]
# Around half of the values of the mask is set to zero with `mask_update`.
mask_update = tf.assign(
mask,
tf.constant(
np.random.choice([0, 1], size=(n_inp, n_out), p=[1./2, 1./2]),
dtype=tf.float32))
loss = tf.reduce_mean(y)
global_step = tf.train.get_or_create_global_step()
train_op = sparse_optim.minimize(loss, global_step)
# Init
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
sess.run([mask_update])
return sess, train_op, mask, weight, global_step
@parameterized.parameters((15, 25, 0.5), (15, 25, 0.2), (3, 5, 0.2))
def testMaskNonUpdateIterations(self, n_inp, n_out, drop_frac):
"""Training a layer for 5 iterations and see whether mask is kept intact.
The mask should be updated only in iterations 1 and 3 (since start_iter=1,
end_iter=4, freq_iter=2).
Args:
n_inp: int, number of input channels.
n_out: int, number of output channels
drop_frac: float, passed to the sparse optimizer.
"""
sess, train_op, mask, _, _ = self._setup_graph(
n_inp, n_out, drop_frac, start_iter=1, end_iter=4, freq_iter=2)
expected_updates = [1, 3]
# Running 5 times to make sure the mask is not updated after end_iter.
for i in range(1, 6):
c_mask, = sess.run([mask])
sess.run([train_op])
c_mask2, = sess.run([mask])
if i not in expected_updates:
self.assertAllEqual(c_mask, c_mask2)
@parameterized.parameters((15, 25, 0.5), (15, 25, 0.7), (30, 10, 0.9))
def testUpdateIterations(self, n_inp, n_out, drop_frac):
"""Checking whether the mask is updated during correct iterations.
The mask should be updated only in iterations 1 and 3 (since start_iter=1,
end_iter=4, freq_iter=2). Number of 1's in the mask should be equal.
Args:
n_inp: int, number of input channels.
n_out: int, number of output channels
drop_frac: float, passed to the sparse optimizer.
"""
sess, train_op, mask, _, _ = self._setup_graph(
n_inp, n_out, drop_frac, start_iter=1, end_iter=4, freq_iter=2)
expected_updates = [1, 3]
# Running 4 times since last update is at 3.
for i in range(1, 5):
c_mask, = sess.run([mask])
sess.run([train_op])
c_mask2, = sess.run([mask])
if i in expected_updates:
# Number of ones (connections) should be same.
self.assertEqual(c_mask.sum(), c_mask2.sum())
# Assert there is some change in the mask.
self.assertNotAllClose(c_mask, c_mask2)
@parameterized.parameters((3, 7, 2), (1, 5, 3), (0, 4, 1))
def testNoDrop(self, start_iter, end_iter, freq_iter):
"""Checks when the drop fraction is 0, no update is made.
The mask should be updated only in iterations 1 and 3 (since start_iter=1,
end_iter=4, freq_iter=2). Number of 1's in the mask should be equal.
Args:
start_iter: int, start iteration for sparse training.
end_iter: int, final iteration for sparse training.
freq_iter: int, mask update frequency.
"""
# Setting drop_fraction to 0; so there is nothing dropped, nothing changed.
sess, train_op, mask, _, _ = self._setup_graph(
3, 5, 0, start_iter=start_iter, end_iter=end_iter, freq_iter=freq_iter)
for _ in range(end_iter+2):
c_mask, = sess.run([mask])
sess.run([train_op])
c_mask2, = sess.run([mask])
self.assertAllEqual(c_mask, c_mask2)
def testNewConnectionZeroInit(self):
"""Checks whether the new connections are initialized correctly to zeros.
"""
end_iter = 4
sess, train_op, mask, weight, _ = self._setup_graph(
n_inp=3, n_out=5, drop_frac=0.5, start_iter=0, end_iter=end_iter,
freq_iter=1)
# Let's iterate until the mask updates are done.
for _ in range(end_iter + 1):
mask_tensor, = sess.run([mask])
sess.run([train_op])
new_mask_tensor, new_weight_tensor = sess.run([mask, weight])
# Let's sum the values of the new connections
new_weights = new_weight_tensor[np.logical_and(mask_tensor == 0,
new_mask_tensor == 1)]
self.assertTrue(np.all(new_weights == 0))
@parameterized.parameters(itertools.product(
((3, 7, 2), (5, 3), (1,)), ('zeros', 'random_normal', 'random_uniform')))
def testShapeOfGetGrowTensor(self, shape, init_type):
"""Checks whether the new tensor is created with correct shape."""
optim = tf.train.GradientDescentOptimizer(0.1)
sparse_optim = sparse_optimizers.SparseSETOptimizer(optim, 0, 0, 1,
use_stateless=False)
weights = tf.random_uniform(shape)
grow_tensor = sparse_optim.get_grow_tensor(weights, init_type)
self.assertAllEqual(weights.shape, grow_tensor.shape)
@parameterized.parameters(itertools.product(
(tf.float32, tf.float64),
('zeros', 'random_normal', 'random_uniform')))
def testDtypeOfGetGrowTensor(self, dtype, init_type):
"""Checks whether the new tensor is created with correct data type."""
optim = tf.train.GradientDescentOptimizer(0.1)
sparse_optim = sparse_optimizers.SparseSETOptimizer(optim, 0, 0, 1,
use_stateless=False)
weights = tf.random_uniform((3, 4), dtype=dtype, maxval=5)
grow_tensor = sparse_optim.get_grow_tensor(weights, init_type)
self.assertEqual(grow_tensor.dtype, weights.dtype)
@parameterized.parameters('ones', 'zero', None, 0)
def testValueErrorOfGetGrowTensor(self, method):
"""Checks whether the new tensor is created with correct shape and type."""
optim = tf.train.GradientDescentOptimizer(0.1)
sparse_optim = sparse_optimizers.SparseSETOptimizer(optim, 0, 0, 1,
use_stateless=False)
weights = tf.random_uniform((3, 4))
with self.assertRaises(ValueError):
sparse_optim.get_grow_tensor(weights, method)
class SparseStaticOptimizerTest(tf.test.TestCase, parameterized.TestCase):
def _setup_graph(self, n_inp, n_out, drop_frac, start_iter=1, end_iter=4,
freq_iter=2):
"""Setups a trivial training procedure for sparse training."""
tf.reset_default_graph()
optim = tf.train.GradientDescentOptimizer(0.1)
sparse_optim = sparse_optimizers.SparseStaticOptimizer(
optim, start_iter, end_iter, freq_iter, drop_fraction=drop_frac)
x = tf.random.uniform((1, n_inp))
y = layers.masked_fully_connected(x, n_out, activation_fn=None)
global_step = tf.train.get_or_create_global_step()
weight = pruning.get_weights()[0]
# There is one masked layer to be trained.
mask = pruning.get_masks()[0]
# Around half of the values of the mask is set to zero with `mask_update`.
mask_update = tf.assign(
mask,
tf.constant(
np.random.choice([0, 1], size=(n_inp, n_out), p=[1./2, 1./2]),
dtype=tf.float32))
loss = tf.reduce_mean(y)
global_step = tf.train.get_or_create_global_step()
train_op = sparse_optim.minimize(loss, global_step)
# Init
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
sess.run([mask_update])
return sess, train_op, mask, weight, global_step
@parameterized.parameters((15, 25, 0.5), (15, 25, 0.2), (3, 5, 0.2))
def testMaskStatic(self, n_inp, n_out, drop_frac):
"""Training a layer for 5 iterations and see whether mask is kept intact.
The mask should be updated only in iterations 1 and 3 (since start_iter=1,
end_iter=4, freq_iter=2).
Args:
n_inp: int, number of input channels.
n_out: int, number of output channels
drop_frac: float, passed to the sparse optimizer.
"""
sess, train_op, mask, _, _ = self._setup_graph(
n_inp, n_out, drop_frac, start_iter=1, end_iter=4, freq_iter=2)
# Running 5 times to make sure the mask is not updated after end_iter.
for _ in range(5):
c_mask, = sess.run([mask])
sess.run([train_op])
c_mask2, = sess.run([mask])
self.assertAllEqual(c_mask, c_mask2)
class SparseMomentumOptimizerTest(tf.test.TestCase, parameterized.TestCase):
def _setup_graph(self, n_inp, n_out, drop_frac, start_iter=1, end_iter=4,
freq_iter=2, momentum=0.5):
"""Setups a trivial training procedure for sparse training."""
tf.reset_default_graph()
optim = tf.train.GradientDescentOptimizer(0.1)
sparse_optim = sparse_optimizers.SparseMomentumOptimizer(
optim, start_iter, end_iter, freq_iter, drop_fraction=drop_frac,
momentum=momentum)
x = tf.ones((1, n_inp))
y = layers.masked_fully_connected(x, n_out, activation_fn=None)
# Multiplying the output with range of constants to have constant but
# different gradients at the masked weights.
y = y * tf.reshape(tf.cast(tf.range(tf.size(y)), dtype=y.dtype), y.shape)
loss = tf.reduce_sum(y)
global_step = tf.train.get_or_create_global_step()
train_op = sparse_optim.minimize(loss, global_step)
weight = pruning.get_weights()[0]
masked_grad = sparse_optim._weight2masked_grads[weight.name]
masked_grad_ema = sparse_optim._ema_grads.average(masked_grad)
# Init
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
return sess, train_op, masked_grad_ema
@parameterized.parameters((3, 4, 0.5), (5, 2, 0.), (2, 5, 1.))
def testMomentumUpdate(self, n_inp, n_out, momentum):
"""Checking whether momentum applied correctly."""
sess, train_op, masked_grad_ema = self._setup_graph(
n_inp, n_out, 0.5, start_iter=1, end_iter=4, freq_iter=2,
momentum=momentum)
# Running 6 times to make sure the momeuntum is always updated.
current_momentum = np.zeros((n_inp, n_out))
for _ in range(6):
ema_masked_grad, = sess.run([masked_grad_ema])
self.assertAllEqual(ema_masked_grad, current_momentum)
sess.run([train_op])
# This is since we multiply the output values with range(n_out)
# Note the broadcast from n_out vector to (n_inp, n_out) matrix.
current_momentum = (current_momentum * momentum +
(1 - momentum) * np.arange(n_out))
ema_masked_grad, = sess.run([masked_grad_ema])
self.assertAllEqual(ema_masked_grad, current_momentum)
class SparseRigLOptimizerTest(tf.test.TestCase, parameterized.TestCase):
def _setup_graph(self, n_inp, n_out, drop_frac, start_iter=1, end_iter=4,
freq_iter=2):
"""Setups a trivial training procedure for sparse training."""
tf.reset_default_graph()
optim = tf.train.GradientDescentOptimizer(1e-3)
global_step = tf.train.get_or_create_global_step()
sparse_optim = sparse_optimizers.SparseRigLOptimizer(
optim, start_iter, end_iter, freq_iter, drop_fraction=drop_frac)
x = tf.ones((1, n_inp))
y = layers.masked_fully_connected(x, n_out, activation_fn=None)
# Multiplying the output with range of constants to have constant but
# different gradients at the masked weights. We also multiply the loss with
# global_step to increase the gradient linearly with time.
scale_vector = (
tf.reshape(tf.cast(tf.range(tf.size(y)), dtype=y.dtype), y.shape) *
tf.cast(global_step, dtype=y.dtype))
y = y * scale_vector
loss = tf.reduce_sum(y)
global_step = tf.train.get_or_create_global_step()
train_op = sparse_optim.minimize(loss, global_step)
weight = pruning.get_weights()[0]
expected_gradient = tf.broadcast_to(scale_vector, weight.shape)
masked_grad = sparse_optim._weight2masked_grads[weight.name]
# Init
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
return sess, train_op, masked_grad, expected_gradient
@parameterized.parameters((3, 4), (5, 2), (2, 5))
def testMaskedGradientCalculation(self, n_inp, n_out):
"""Checking whether masked_grad is calculated after apply_gradients."""
# No drop since we don't want to change the mask but check whether the grad
# is calculated after the gradient step.
sess, train_op, masked_grad, expected_gradient = self._setup_graph(
n_inp, n_out, 0., start_iter=0, end_iter=3, freq_iter=1)
# Since we only update the mask every 2 iterations, we will iterate 6 times.
for i in range(6):
is_mask_update = i % 2 == 0
if is_mask_update:
expected_gradient_tensor, = sess.run([expected_gradient])
_, masked_grad_tensor = sess.run([train_op, masked_grad])
self.assertAllEqual(masked_grad_tensor,
expected_gradient_tensor)
else:
sess.run([train_op])
@parameterized.parameters(
(3, 7, 2, [1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1]),
(1, 5, 3, [1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1]),
(0, 4, 1, [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1]))
def testApplyGradients(self, start_iter, end_iter, freq_iter, is_incremented):
"""Checking apply_gradient is called in non mask update iterations."""
sess, train_op, _, _ = self._setup_graph(
3, 5, .5, start_iter=start_iter, end_iter=end_iter, freq_iter=freq_iter)
global_step = tf.train.get_or_create_global_step()
# Since we only update the mask every 2 iterations, we will iterate 6 times.
for one_if_incremented in is_incremented:
before, = sess.run([global_step])
sess.run([train_op])
after, = sess.run([global_step])
if one_if_incremented == 1:
self.assertEqual(before + 1, after)
else:
# Mask update step.
self.assertEqual(before, after)
class SparseSnipOptimizerTest(tf.test.TestCase, parameterized.TestCase):
def _setup_graph(self, default_sparsity, mask_init_method,
custom_sparsity_map, n_inp=3, n_out=5):
"""Setups a trivial training procedure for sparse training."""
tf.reset_default_graph()
optim = tf.train.GradientDescentOptimizer(1e-3)
sparse_optim = sparse_optimizers.SparseSnipOptimizer(
optim, default_sparsity, mask_init_method,
custom_sparsity_map=custom_sparsity_map)
inp_values = np.arange(1, n_inp+1)
scale_vector_values = np.random.uniform(size=(n_out,)) - 0.5
# The gradient is the outer product of input and the output gradients.
# Since the loss is sample sum the output gradient is equal to the scale
# vector.
expected_grads = np.outer(inp_values, scale_vector_values)
x = tf.reshape(tf.constant(inp_values, dtype=tf.float32), (1, n_inp))
y = layers.masked_fully_connected(x, n_out, activation_fn=None)
scale_vector = tf.constant(scale_vector_values, dtype=tf.float32)
y = y * scale_vector
loss = tf.reduce_sum(y)
global_step = tf.train.get_or_create_global_step()
train_op = sparse_optim.minimize(loss, global_step)
# Init
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
mask = pruning.get_masks()[0]
weights = pruning.get_weights()[0]
return sess, train_op, expected_grads, sparse_optim, mask, weights
@parameterized.parameters((3, 4, 0.5), (5, 3, 0.8), (8, 5, 0.8))
def testSnipSparsity(self, n_inp, n_out, default_sparsity):
"""Checking whether masked_grad is calculated after apply_gradients."""
# No drop since we don't want to change the mask but check whether the grad
# is calculated after the gradient step.
sess, train_op, _, _, mask, _ = self._setup_graph(
default_sparsity, 'random', {}, n_inp=n_inp, n_out=n_out)
_ = sess.run([train_op])
snipped_mask, = sess.run([mask])
n_ones = np.sum(snipped_mask)
n_zeros = snipped_mask.size - n_ones
n_zeros_expected = sparse_utils.get_n_zeros(snipped_mask.size,
default_sparsity)
self.assertEqual(n_zeros, n_zeros_expected)
@parameterized.parameters((3, 4, 0.5), (5, 3, 0.8), (8, 5, 0.8))
def testGradientUsed(self, n_inp, n_out, default_sparsity):
"""Checking whether masked_grad is calculated after apply_gradients."""
# No drop since we don't want to change the mask but check whether the grad
# is calculated after the gradient step.
sess, train_op, expected_grads, _, mask, weights = self._setup_graph(
default_sparsity, 'random', {}, n_inp=n_inp, n_out=n_out)
# Calculate sensitivity scores.
weights, = sess.run([weights])
expected_scores = np.abs(expected_grads*weights)
_ = sess.run([train_op])
snipped_mask, = sess.run([mask])
kept_connection_scores = expected_scores[snipped_mask == 1]
min_score_kept = np.min(kept_connection_scores)
snipped_connection_scores = expected_scores[snipped_mask == 0]
max_score_snipped = np.max(snipped_connection_scores)
self.assertLessEqual(max_score_snipped, min_score_kept)
@parameterized.parameters((3, 4, 0.5), (5, 3, 0.8), (8, 5, 0.8))
def testInitialMaskIsDense(self, n_inp, n_out, default_sparsity):
"""Checking whether masked_grad is calculated after apply_gradients."""
# No drop since we don't want to change the mask but check whether the grad
# is calculated after the gradient step.
sess, _, _, _, mask, _ = self._setup_graph(
default_sparsity, 'random', {}, n_inp=n_inp, n_out=n_out)
mask_start, = sess.run([mask])
self.assertEqual(np.sum(mask_start), mask_start.size)
@parameterized.parameters((3, 4, 0.5), (5, 3, 0.8), (8, 5, 0.8))
def testAfterSnipTraining(self, n_inp, n_out, default_sparsity):
"""Checking whether masked_grad is calculated after apply_gradients."""
# No drop since we don't want to change the mask but check whether the grad
# is calculated after the gradient step.
sess, train_op, _, sparse_optim, mask, _ = self._setup_graph(
default_sparsity, 'random', {}, n_inp=n_inp, n_out=n_out)
global_step = tf.train.get_or_create_global_step()
is_snip_iter = sess.run([train_op])
self.assertTrue(is_snip_iter)
# On other iterations mask should stay same. Let's do 3 more iterations.
for i in range(3):
mask_before, c_iter = sess.run([mask, global_step])
self.assertEqual(i, c_iter)
is_snip_iter, is_snipped = sess.run([train_op, sparse_optim.is_snipped])
self.assertTrue(is_snipped)
self.assertFalse(is_snip_iter)
mask_after, = sess.run([mask])
self.assertAllEqual(mask_after, mask_before)
class SparseDNWOptimizerTest(tf.test.TestCase, parameterized.TestCase):
def _setup_graph(self,
default_sparsity,
mask_init_method,
custom_sparsity_map,
n_inp=3,
n_out=5):
"""Setups a trivial training procedure for sparse training."""
tf.reset_default_graph()
optim = tf.train.GradientDescentOptimizer(1e-3)
sparse_optim = sparse_optimizers.SparseDNWOptimizer(
optim,
default_sparsity,
mask_init_method,
custom_sparsity_map=custom_sparsity_map)
inp_values = np.arange(1, n_inp + 1)
scale_vector_values = np.random.uniform(size=(n_out,)) - 0.5
# The gradient is the outer product of input and the output gradients.
# Since the loss is sample sum the output gradient is equal to the scale
# vector.
expected_grads = np.outer(inp_values, scale_vector_values)
x = tf.reshape(tf.constant(inp_values, dtype=tf.float32), (1, n_inp))
y = layers.masked_fully_connected(x, n_out, activation_fn=None)
scale_vector = tf.constant(scale_vector_values, dtype=tf.float32)
y = y * scale_vector
loss = tf.reduce_sum(y)
global_step = tf.train.get_or_create_global_step()
grads_and_vars = sparse_optim.compute_gradients(loss)
train_op = sparse_optim.apply_gradients(
grads_and_vars, global_step=global_step)
# Init
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
mask = pruning.get_masks()[0]
weights = pruning.get_weights()[0]
return (sess, train_op, (expected_grads, grads_and_vars), mask, weights)
@parameterized.parameters((3, 4, 0.5), (5, 3, 0.8), (8, 5, 0.8))
def testDNWSparsity(self, n_inp, n_out, default_sparsity):
"""Checking whether masked_grad is calculated after apply_gradients."""
# No drop since we don't want to change the mask but check whether the grad
# is calculated after the gradient step.
sess, train_op, _, mask, _ = self._setup_graph(
default_sparsity, 'random', {}, n_inp=n_inp, n_out=n_out)
_ = sess.run([train_op])
dnw_mask, = sess.run([mask])
n_ones = np.sum(dnw_mask)
n_zeros = dnw_mask.size - n_ones
n_zeros_expected = sparse_utils.get_n_zeros(dnw_mask.size, default_sparsity)
self.assertEqual(n_zeros, n_zeros_expected)
@parameterized.parameters((3, 4, 0.5), (5, 3, 0.8), (8, 5, 0.8))
def testWeightsUsed(self, n_inp, n_out, default_sparsity):
"""Checking whether masked_grad is calculated after apply_gradients."""
# No drop since we don't want to change the mask but check whether the grad
# is calculated after the gradient step.
sess, train_op, _, mask, weights = self._setup_graph(
default_sparsity, 'random', {}, n_inp=n_inp, n_out=n_out)
# Calculate sensitivity scores.
weights, = sess.run([weights])
expected_scores = np.abs(weights)
_ = sess.run([train_op])
dnw_mask, = sess.run([mask])
kept_connection_scores = expected_scores[dnw_mask == 1]
min_score_kept = np.min(kept_connection_scores)
dnw_mask_connection_scores = expected_scores[dnw_mask == 0]
max_score_removed = np.max(dnw_mask_connection_scores)
self.assertLessEqual(max_score_removed, min_score_kept)
@parameterized.parameters((3, 4, 0.5), (5, 3, 0.8), (8, 5, 0.8))
def testGradientIsDense(self, n_inp, n_out, default_sparsity):
"""Checking whether calculated gradients are dense."""
sess, _, grad_info, _, _ = self._setup_graph(
default_sparsity, 'random', {}, n_inp=n_inp, n_out=n_out)
expected_grad, grads_and_vars = grad_info
grad, = sess.run([grads_and_vars[0][0]])
self.assertAllClose(expected_grad, grad)
@parameterized.parameters((3, 4, 0.5), (5, 3, 0.8), (8, 5, 0.8))
def testDNWUpdates(self, n_inp, n_out, default_sparsity):
"""Checking whether mask is updated correctly."""
sess, train_op, _, mask, weights = self._setup_graph(
default_sparsity, 'random', {}, n_inp=n_inp, n_out=n_out)
# On all iterations mask should have least magnitude connections.
for _ in range(5):
sess.run([train_op])
mask_after, weights_after = sess.run([mask, weights])
kept_connection_magnitudes = np.abs(weights_after[mask_after == 1])
min_score_kept = np.min(kept_connection_magnitudes)
removed_connection_magnitudes = np.abs(weights_after[mask_after == 0])
max_score_removed = np.max(removed_connection_magnitudes)
self.assertLessEqual(max_score_removed, min_score_kept)
@parameterized.parameters((3, 4, 0.5), (5, 3, 0.8), (8, 5, 0.8))
def testSparsityAfterDNWUpdates(self, n_inp, n_out, default_sparsity):
"""Checking whether mask is updated correctly."""
sess, train_op, _, mask, _ = self._setup_graph(
default_sparsity, 'random', {}, n_inp=n_inp, n_out=n_out)
# On all iterations mask should have least magnitude connections.
for _ in range(5):
sess.run([train_op])
dnw_mask, = sess.run([mask])
n_ones = np.sum(dnw_mask)
n_zeros = dnw_mask.size - n_ones
n_zeros_expected = sparse_utils.get_n_zeros(dnw_mask.size,
default_sparsity)
self.assertEqual(n_zeros, n_zeros_expected)
if __name__ == '__main__':
tf.test.main()
| |
from ..Qt import QtCore, QtGui
from .DockDrop import *
from ..widgets.VerticalLabel import VerticalLabel
from ..python2_3 import asUnicode
class Dock(QtGui.QWidget, DockDrop):
sigStretchChanged = QtCore.Signal()
def __init__(self, name, area=None, size=(10, 10), widget=None, hideTitle=False, autoOrientation=True, closable=False):
QtGui.QWidget.__init__(self)
DockDrop.__init__(self)
self.area = area
self.label = DockLabel(name, self, closable)
if closable:
self.label.sigCloseClicked.connect(self.close)
self.labelHidden = False
self.moveLabel = True ## If false, the dock is no longer allowed to move the label.
self.autoOrient = autoOrientation
self.orientation = 'horizontal'
#self.label.setAlignment(QtCore.Qt.AlignHCenter)
self.topLayout = QtGui.QGridLayout()
self.topLayout.setContentsMargins(0, 0, 0, 0)
self.topLayout.setSpacing(0)
self.setLayout(self.topLayout)
self.topLayout.addWidget(self.label, 0, 1)
self.widgetArea = QtGui.QWidget()
self.topLayout.addWidget(self.widgetArea, 1, 1)
self.layout = QtGui.QGridLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
self.widgetArea.setLayout(self.layout)
self.widgetArea.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
self.widgets = []
self.currentRow = 0
#self.titlePos = 'top'
self.raiseOverlay()
self.hStyle = """
Dock > QWidget {
border: 1px solid #000;
border-radius: 5px;
border-top-left-radius: 0px;
border-top-right-radius: 0px;
border-top-width: 0px;
}"""
self.vStyle = """
Dock > QWidget {
border: 1px solid #000;
border-radius: 5px;
border-top-left-radius: 0px;
border-bottom-left-radius: 0px;
border-left-width: 0px;
}"""
self.nStyle = """
Dock > QWidget {
border: 1px solid #000;
border-radius: 5px;
}"""
self.dragStyle = """
Dock > QWidget {
border: 4px solid #00F;
border-radius: 5px;
}"""
self.setAutoFillBackground(False)
self.widgetArea.setStyleSheet(self.hStyle)
self.setStretch(*size)
if widget is not None:
self.addWidget(widget)
if hideTitle:
self.hideTitleBar()
def implements(self, name=None):
if name is None:
return ['dock']
else:
return name == 'dock'
def setStretch(self, x=None, y=None):
"""
Set the 'target' size for this Dock.
The actual size will be determined by comparing this Dock's
stretch value to the rest of the docks it shares space with.
"""
#print "setStretch", self, x, y
#self._stretch = (x, y)
if x is None:
x = 0
if y is None:
y = 0
#policy = self.sizePolicy()
#policy.setHorizontalStretch(x)
#policy.setVerticalStretch(y)
#self.setSizePolicy(policy)
self._stretch = (x, y)
self.sigStretchChanged.emit()
#print "setStretch", self, x, y, self.stretch()
def stretch(self):
#policy = self.sizePolicy()
#return policy.horizontalStretch(), policy.verticalStretch()
return self._stretch
#def stretch(self):
#return self._stretch
def hideTitleBar(self):
"""
Hide the title bar for this Dock.
This will prevent the Dock being moved by the user.
"""
self.label.hide()
self.labelHidden = True
if 'center' in self.allowedAreas:
self.allowedAreas.remove('center')
self.updateStyle()
def showTitleBar(self):
"""
Show the title bar for this Dock.
"""
self.label.show()
self.labelHidden = False
self.allowedAreas.add('center')
self.updateStyle()
def setOrientation(self, o='auto', force=False):
"""
Sets the orientation of the title bar for this Dock.
Must be one of 'auto', 'horizontal', or 'vertical'.
By default ('auto'), the orientation is determined
based on the aspect ratio of the Dock.
"""
#print self.name(), "setOrientation", o, force
if o == 'auto' and self.autoOrient:
if self.container().type() == 'tab':
o = 'horizontal'
elif self.width() > self.height()*1.5:
o = 'vertical'
else:
o = 'horizontal'
if force or self.orientation != o:
self.orientation = o
self.label.setOrientation(o)
self.updateStyle()
def updateStyle(self):
## updates orientation and appearance of title bar
#print self.name(), "update style:", self.orientation, self.moveLabel, self.label.isVisible()
if self.labelHidden:
self.widgetArea.setStyleSheet(self.nStyle)
elif self.orientation == 'vertical':
self.label.setOrientation('vertical')
if self.moveLabel:
#print self.name(), "reclaim label"
self.topLayout.addWidget(self.label, 1, 0)
self.widgetArea.setStyleSheet(self.vStyle)
else:
self.label.setOrientation('horizontal')
if self.moveLabel:
#print self.name(), "reclaim label"
self.topLayout.addWidget(self.label, 0, 1)
self.widgetArea.setStyleSheet(self.hStyle)
def resizeEvent(self, ev):
self.setOrientation()
self.resizeOverlay(self.size())
def name(self):
return asUnicode(self.label.text())
def container(self):
return self._container
def addWidget(self, widget, row=None, col=0, rowspan=1, colspan=1):
"""
Add a new widget to the interior of this Dock.
Each Dock uses a QGridLayout to arrange widgets within.
"""
if row is None:
row = self.currentRow
self.currentRow = max(row+1, self.currentRow)
self.widgets.append(widget)
self.layout.addWidget(widget, row, col, rowspan, colspan)
self.raiseOverlay()
def startDrag(self):
self.drag = QtGui.QDrag(self)
mime = QtCore.QMimeData()
#mime.setPlainText("asd")
self.drag.setMimeData(mime)
self.widgetArea.setStyleSheet(self.dragStyle)
self.update()
action = self.drag.exec_()
self.updateStyle()
def float(self):
self.area.floatDock(self)
def containerChanged(self, c):
#print self.name(), "container changed"
self._container = c
if c.type() != 'tab':
self.moveLabel = True
self.label.setDim(False)
else:
self.moveLabel = False
self.setOrientation(force=True)
def raiseDock(self):
"""If this Dock is stacked underneath others, raise it to the top."""
self.container().raiseDock(self)
def close(self):
"""Remove this dock from the DockArea it lives inside."""
self.setParent(None)
self.label.setParent(None)
self._container.apoptose()
self._container = None
def __repr__(self):
return "<Dock %s %s>" % (self.name(), self.stretch())
## PySide bug: We need to explicitly redefine these methods
## or else drag/drop events will not be delivered.
def dragEnterEvent(self, *args):
DockDrop.dragEnterEvent(self, *args)
def dragMoveEvent(self, *args):
DockDrop.dragMoveEvent(self, *args)
def dragLeaveEvent(self, *args):
DockDrop.dragLeaveEvent(self, *args)
def dropEvent(self, *args):
DockDrop.dropEvent(self, *args)
class DockLabel(VerticalLabel):
sigClicked = QtCore.Signal(object, object)
sigCloseClicked = QtCore.Signal()
def __init__(self, text, dock, showCloseButton):
self.dim = False
self.fixedWidth = False
VerticalLabel.__init__(self, text, orientation='horizontal', forceWidth=False)
self.setAlignment(QtCore.Qt.AlignTop|QtCore.Qt.AlignHCenter)
self.dock = dock
self.updateStyle()
self.setAutoFillBackground(False)
self.startedDrag = False
self.closeButton = None
if showCloseButton:
self.closeButton = QtGui.QToolButton(self)
self.closeButton.clicked.connect(self.sigCloseClicked)
self.closeButton.setIcon(QtGui.QApplication.style().standardIcon(QtGui.QStyle.SP_TitleBarCloseButton))
def updateStyle(self):
r = '3px'
if self.dim:
fg = '#aaa'
bg = '#44a'
border = '#339'
else:
fg = '#fff'
bg = '#66c'
border = '#55B'
if self.orientation == 'vertical':
self.vStyle = """DockLabel {
background-color : %s;
color : %s;
border-top-right-radius: 0px;
border-top-left-radius: %s;
border-bottom-right-radius: 0px;
border-bottom-left-radius: %s;
border-width: 0px;
border-right: 2px solid %s;
padding-top: 3px;
padding-bottom: 3px;
}""" % (bg, fg, r, r, border)
self.setStyleSheet(self.vStyle)
else:
self.hStyle = """DockLabel {
background-color : %s;
color : %s;
border-top-right-radius: %s;
border-top-left-radius: %s;
border-bottom-right-radius: 0px;
border-bottom-left-radius: 0px;
border-width: 0px;
border-bottom: 2px solid %s;
padding-left: 3px;
padding-right: 3px;
}""" % (bg, fg, r, r, border)
self.setStyleSheet(self.hStyle)
def setDim(self, d):
if self.dim != d:
self.dim = d
self.updateStyle()
def setOrientation(self, o):
VerticalLabel.setOrientation(self, o)
self.updateStyle()
def mousePressEvent(self, ev):
if ev.button() == QtCore.Qt.LeftButton:
self.pressPos = ev.pos()
self.startedDrag = False
ev.accept()
def mouseMoveEvent(self, ev):
if not self.startedDrag and (ev.pos() - self.pressPos).manhattanLength() > QtGui.QApplication.startDragDistance():
self.dock.startDrag()
ev.accept()
def mouseReleaseEvent(self, ev):
if not self.startedDrag:
self.sigClicked.emit(self, ev)
ev.accept()
def mouseDoubleClickEvent(self, ev):
if ev.button() == QtCore.Qt.LeftButton:
self.dock.float()
def resizeEvent (self, ev):
if self.closeButton:
if self.orientation == 'vertical':
size = ev.size().width()
pos = QtCore.QPoint(0, 0)
else:
size = ev.size().height()
pos = QtCore.QPoint(ev.size().width() - size, 0)
self.closeButton.setFixedSize(QtCore.QSize(size, size))
self.closeButton.move(pos)
super(DockLabel,self).resizeEvent(ev)
| |
import os
import shutil
import sys
import tempfile
import six
import tests
base_module_contents = """
import socket
import urllib
print("base {0} {1}".format(socket, urllib))
"""
patching_module_contents = """
from eventlet.green import socket
from eventlet.green import urllib
from eventlet import patcher
print('patcher {0} {1}'.format(socket, urllib))
patcher.inject('base', globals(), ('socket', socket), ('urllib', urllib))
del patcher
"""
import_module_contents = """
import patching
import socket
print("importing {0} {1} {2} {3}".format(patching, socket, patching.socket, patching.urllib))
"""
class ProcessBase(tests.LimitedTestCase):
TEST_TIMEOUT = 3 # starting processes is time-consuming
def setUp(self):
super(ProcessBase, self).setUp()
self._saved_syspath = sys.path
self.tempdir = tempfile.mkdtemp('_patcher_test')
def tearDown(self):
super(ProcessBase, self).tearDown()
sys.path = self._saved_syspath
shutil.rmtree(self.tempdir)
def write_to_tempfile(self, name, contents):
filename = os.path.join(self.tempdir, name)
if not filename.endswith('.py'):
filename = filename + '.py'
with open(filename, "w") as fd:
fd.write(contents)
def launch_subprocess(self, filename):
path = os.path.join(self.tempdir, filename)
output = tests.run_python(path)
if six.PY3:
output = output.decode('utf-8')
separator = '\n'
else:
separator = b'\n'
lines = output.split(separator)
return output, lines
def run_script(self, contents, modname=None):
if modname is None:
modname = "testmod"
self.write_to_tempfile(modname, contents)
return self.launch_subprocess(modname)
class ImportPatched(ProcessBase):
def test_patch_a_module(self):
self.write_to_tempfile("base", base_module_contents)
self.write_to_tempfile("patching", patching_module_contents)
self.write_to_tempfile("importing", import_module_contents)
output, lines = self.launch_subprocess('importing.py')
assert lines[0].startswith('patcher'), repr(output)
assert lines[1].startswith('base'), repr(output)
assert lines[2].startswith('importing'), repr(output)
assert 'eventlet.green.socket' in lines[1], repr(output)
assert 'eventlet.green.urllib' in lines[1], repr(output)
assert 'eventlet.green.socket' in lines[2], repr(output)
assert 'eventlet.green.urllib' in lines[2], repr(output)
assert 'eventlet.green.httplib' not in lines[2], repr(output)
def test_import_patched_defaults():
tests.run_isolated('patcher_import_patched_defaults.py')
class MonkeyPatch(ProcessBase):
def test_patched_modules(self):
new_mod = """
from eventlet import patcher
patcher.monkey_patch()
import socket
try:
import urllib.request as urllib
except ImportError:
import urllib
print("newmod {0} {1}".format(socket.socket, urllib.socket.socket))
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
assert lines[0].startswith('newmod'), repr(output)
self.assertEqual(lines[0].count('GreenSocket'), 2, repr(output))
def test_early_patching(self):
new_mod = """
from eventlet import patcher
patcher.monkey_patch()
import eventlet
eventlet.sleep(0.01)
print("newmod")
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, repr(output))
assert lines[0].startswith('newmod'), repr(output)
def test_late_patching(self):
new_mod = """
import eventlet
eventlet.sleep(0.01)
from eventlet import patcher
patcher.monkey_patch()
eventlet.sleep(0.01)
print("newmod")
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, repr(output))
assert lines[0].startswith('newmod'), repr(output)
def test_typeerror(self):
new_mod = """
from eventlet import patcher
patcher.monkey_patch(finagle=True)
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
assert lines[-2].startswith('TypeError'), repr(output)
assert 'finagle' in lines[-2], repr(output)
def assert_boolean_logic(self, call, expected, not_expected=''):
expected_list = ", ".join(['"%s"' % x for x in expected.split(',') if len(x)])
not_expected_list = ", ".join(['"%s"' % x for x in not_expected.split(',') if len(x)])
new_mod = """
from eventlet import patcher
%s
for mod in [%s]:
assert patcher.is_monkey_patched(mod), mod
for mod in [%s]:
assert not patcher.is_monkey_patched(mod), mod
print("already_patched {0}".format(",".join(sorted(patcher.already_patched.keys()))))
""" % (call, expected_list, not_expected_list)
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
ap = 'already_patched'
assert lines[0].startswith(ap), repr(output)
patched_modules = lines[0][len(ap):].strip()
# psycopg might or might not be patched based on installed modules
patched_modules = patched_modules.replace("psycopg,", "")
# ditto for MySQLdb
patched_modules = patched_modules.replace("MySQLdb,", "")
self.assertEqual(
patched_modules, expected,
"Logic:%s\nExpected: %s != %s" % (call, expected, patched_modules))
def test_boolean(self):
self.assert_boolean_logic("patcher.monkey_patch()",
'os,select,socket,subprocess,thread,time')
def test_boolean_all(self):
self.assert_boolean_logic("patcher.monkey_patch(all=True)",
'os,select,socket,subprocess,thread,time')
def test_boolean_all_single(self):
self.assert_boolean_logic("patcher.monkey_patch(all=True, socket=True)",
'os,select,socket,subprocess,thread,time')
def test_boolean_all_negative(self):
self.assert_boolean_logic(
"patcher.monkey_patch(all=False, socket=False, select=True)",
'select')
def test_boolean_single(self):
self.assert_boolean_logic("patcher.monkey_patch(socket=True)",
'socket')
def test_boolean_double(self):
self.assert_boolean_logic("patcher.monkey_patch(socket=True, select=True)",
'select,socket')
def test_boolean_negative(self):
self.assert_boolean_logic("patcher.monkey_patch(socket=False)",
'os,select,subprocess,thread,time')
def test_boolean_negative2(self):
self.assert_boolean_logic("patcher.monkey_patch(socket=False, time=False)",
'os,select,subprocess,thread')
def test_conflicting_specifications(self):
self.assert_boolean_logic("patcher.monkey_patch(socket=False, select=True)",
'select')
test_monkey_patch_threading = """
def test_monkey_patch_threading():
tickcount = [0]
def tick():
import six
for i in six.moves.range(1000):
tickcount[0] += 1
eventlet.sleep()
def do_sleep():
tpool.execute(time.sleep, 0.5)
eventlet.spawn(tick)
w1 = eventlet.spawn(do_sleep)
w1.wait()
print(tickcount[0])
assert tickcount[0] > 900
tpool.killall()
"""
class Tpool(ProcessBase):
TEST_TIMEOUT = 3
@tests.skip_with_pyevent
def test_simple(self):
new_mod = """
import eventlet
from eventlet import patcher
patcher.monkey_patch()
from eventlet import tpool
print("newmod {0}".format(tpool.execute(len, "hi")))
print("newmod {0}".format(tpool.execute(len, "hi2")))
tpool.killall()
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, output)
assert lines[0].startswith('newmod'), repr(output)
assert '2' in lines[0], repr(output)
assert '3' in lines[1], repr(output)
@tests.skip_with_pyevent
def test_unpatched_thread(self):
new_mod = """import eventlet
eventlet.monkey_patch(time=False, thread=False)
from eventlet import tpool
import time
"""
new_mod += test_monkey_patch_threading
new_mod += "\ntest_monkey_patch_threading()\n"
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, lines)
@tests.skip_with_pyevent
def test_patched_thread(self):
new_mod = """import eventlet
eventlet.monkey_patch(time=False, thread=True)
from eventlet import tpool
import time
"""
new_mod += test_monkey_patch_threading
new_mod += "\ntest_monkey_patch_threading()\n"
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, "\n".join(lines))
def test_subprocess_after_monkey_patch():
code = '''\
import sys
import eventlet
eventlet.monkey_patch()
from eventlet.green import subprocess
subprocess.Popen([sys.executable, '-c', ''], stdin=subprocess.PIPE).wait()
print('pass')
'''
output = tests.run_python(
path=None,
args=['-c', code],
)
assert output.rstrip() == b'pass'
class Threading(ProcessBase):
def test_orig_thread(self):
new_mod = """import eventlet
eventlet.monkey_patch()
from eventlet import patcher
import threading
_threading = patcher.original('threading')
def test():
print(repr(threading.currentThread()))
t = _threading.Thread(target=test)
t.start()
t.join()
print(len(threading._active))
print(len(_threading._active))
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 4, "\n".join(lines))
assert lines[0].startswith('<Thread'), lines[0]
assert lines[1] == '1', lines
assert lines[2] == '1', lines
def test_tpool(self):
new_mod = """import eventlet
eventlet.monkey_patch()
from eventlet import tpool
import threading
def test():
print(repr(threading.currentThread()))
tpool.execute(test)
print(len(threading._active))
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
assert lines[0].startswith('<Thread'), lines[0]
self.assertEqual(lines[1], "1", lines[1])
def test_greenlet(self):
new_mod = """import eventlet
eventlet.monkey_patch()
from eventlet import event
import threading
evt = event.Event()
def test():
print(repr(threading.currentThread()))
evt.send()
eventlet.spawn_n(test)
evt.wait()
print(len(threading._active))
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
assert lines[0].startswith('<_MainThread'), lines[0]
self.assertEqual(lines[1], "1", lines[1])
def test_greenthread(self):
new_mod = """import eventlet
eventlet.monkey_patch()
import threading
def test():
print(repr(threading.currentThread()))
t = eventlet.spawn(test)
t.wait()
print(len(threading._active))
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
assert lines[0].startswith('<_GreenThread'), lines[0]
self.assertEqual(lines[1], "1", lines[1])
def test_keyerror(self):
new_mod = """import eventlet
eventlet.monkey_patch()
"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 1, "\n".join(lines))
class Os(ProcessBase):
def test_waitpid(self):
new_mod = """import subprocess
import eventlet
eventlet.monkey_patch(all=False, os=True)
process = subprocess.Popen("sleep 0.1 && false", shell=True)
print(process.wait())"""
self.write_to_tempfile("newmod", new_mod)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, "\n".join(lines))
self.assertEqual('1', lines[0], repr(output))
class GreenThreadWrapper(ProcessBase):
prologue = """import eventlet
eventlet.monkey_patch()
import threading
def test():
t = threading.currentThread()
"""
epilogue = """
t = eventlet.spawn(test)
t.wait()
"""
def test_join(self):
self.write_to_tempfile("newmod", self.prologue + """
def test2():
global t2
t2 = threading.currentThread()
eventlet.spawn(test2)
""" + self.epilogue + """
print(repr(t2))
t2.join()
""")
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 2, "\n".join(lines))
assert lines[0].startswith('<_GreenThread'), lines[0]
def test_name(self):
self.write_to_tempfile("newmod", self.prologue + """
print(t.name)
print(t.getName())
print(t.get_name())
t.name = 'foo'
print(t.name)
print(t.getName())
print(t.get_name())
t.setName('bar')
print(t.name)
print(t.getName())
print(t.get_name())
""" + self.epilogue)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 10, "\n".join(lines))
for i in range(0, 3):
self.assertEqual(lines[i], "GreenThread-1", lines[i])
for i in range(3, 6):
self.assertEqual(lines[i], "foo", lines[i])
for i in range(6, 9):
self.assertEqual(lines[i], "bar", lines[i])
def test_ident(self):
self.write_to_tempfile("newmod", self.prologue + """
print(id(t._g))
print(t.ident)
""" + self.epilogue)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
self.assertEqual(lines[0], lines[1])
def test_is_alive(self):
self.write_to_tempfile("newmod", self.prologue + """
print(t.is_alive())
print(t.isAlive())
""" + self.epilogue)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
self.assertEqual(lines[0], "True", lines[0])
self.assertEqual(lines[1], "True", lines[1])
def test_is_daemon(self):
self.write_to_tempfile("newmod", self.prologue + """
print(t.is_daemon())
print(t.isDaemon())
""" + self.epilogue)
output, lines = self.launch_subprocess('newmod.py')
self.assertEqual(len(lines), 3, "\n".join(lines))
self.assertEqual(lines[0], "True", lines[0])
self.assertEqual(lines[1], "True", lines[1])
def test_patcher_existing_locks_early():
tests.run_isolated('patcher_existing_locks_early.py')
def test_patcher_existing_locks_late():
tests.run_isolated('patcher_existing_locks_late.py')
def test_patcher_existing_locks_locked():
tests.run_isolated('patcher_existing_locks_locked.py')
@tests.skip_if_CRLock_exist
def test_patcher_existing_locks_unlocked():
tests.run_isolated('patcher_existing_locks_unlocked.py')
def test_importlib_lock():
tests.run_isolated('patcher_importlib_lock.py')
def test_threading_condition():
tests.run_isolated('patcher_threading_condition.py')
def test_threading_join():
tests.run_isolated('patcher_threading_join.py')
def test_socketserver_selectors():
tests.run_isolated('patcher_socketserver_selectors.py')
def test_blocking_select_methods_are_deleted():
tests.run_isolated('patcher_blocking_select_methods_are_deleted.py')
def test_regular_file_readall():
tests.run_isolated('regular_file_readall.py')
def test_threading_current():
tests.run_isolated('patcher_threading_current.py')
| |
"""
Mesh refinement for triangular grids.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from matplotlib.tri.triangulation import Triangulation
import matplotlib.tri.triinterpolate
class TriRefiner(object):
"""
Abstract base class for classes implementing mesh refinement.
A TriRefiner encapsulates a Triangulation object and provides tools for
mesh refinement and interpolation.
Derived classes must implements:
- ``refine_triangulation(return_tri_index=False, **kwargs)`` , where
the optional keyword arguments *kwargs* are defined in each
TriRefiner concrete implementation, and which returns :
- a refined triangulation
- optionally (depending on *return_tri_index*), for each
point of the refined triangulation: the index of
the initial triangulation triangle to which it belongs.
- ``refine_field(z, triinterpolator=None, **kwargs)`` , where:
- *z* array of field values (to refine) defined at the base
triangulation nodes
- *triinterpolator* is a
:class:`~matplotlib.tri.TriInterpolator` (optional)
- the other optional keyword arguments *kwargs* are defined in
each TriRefiner concrete implementation
and which returns (as a tuple) a refined triangular mesh and the
interpolated values of the field at the refined triangulation nodes.
"""
def __init__(self, triangulation):
if not isinstance(triangulation, Triangulation):
raise ValueError("Expected a Triangulation object")
self._triangulation = triangulation
class UniformTriRefiner(TriRefiner):
"""
Uniform mesh refinement by recursive subdivisions.
Parameters
----------
triangulation : :class:`~matplotlib.tri.Triangulation`
The encapsulated triangulation (to be refined)
"""
# See Also
# --------
# :class:`~matplotlib.tri.CubicTriInterpolator` and
# :class:`~matplotlib.tri.TriAnalyzer`.
# """
def __init__(self, triangulation):
TriRefiner.__init__(self, triangulation)
def refine_triangulation(self, return_tri_index=False, subdiv=3):
"""
Computes an uniformly refined triangulation *refi_triangulation* of
the encapsulated :attr:`triangulation`.
This function refines the encapsulated triangulation by splitting each
father triangle into 4 child sub-triangles built on the edges midside
nodes, recursively (level of recursion *subdiv*).
In the end, each triangle is hence divided into ``4**subdiv``
child triangles.
The default value for *subdiv* is 3 resulting in 64 refined
subtriangles for each triangle of the initial triangulation.
Parameters
----------
return_tri_index : boolean, optional
Boolean indicating whether an index table indicating the father
triangle index of each point will be returned. Default value
False.
subdiv : integer, optional
Recursion level for the subdivision. Defaults value 3.
Each triangle will be divided into ``4**subdiv`` child triangles.
Returns
-------
refi_triangulation : :class:`~matplotlib.tri.Triangulation`
The returned refined triangulation
found_index : array-like of integers
Index of the initial triangulation containing triangle, for each
point of *refi_triangulation*.
Returned only if *return_tri_index* is set to True.
"""
refi_triangulation = self._triangulation
ntri = refi_triangulation.triangles.shape[0]
# Computes the triangulation ancestors numbers in the reference
# triangulation.
ancestors = np.arange(ntri, dtype=np.int32)
for _ in range(subdiv):
refi_triangulation, ancestors = self._refine_triangulation_once(
refi_triangulation, ancestors)
refi_npts = refi_triangulation.x.shape[0]
refi_triangles = refi_triangulation.triangles
# Now we compute found_index table if needed
if return_tri_index:
# We have to initialize found_index with -1 because some nodes
# may very well belong to no triangle at all, e.g., in case of
# Delaunay Triangulation with DuplicatePointWarning.
found_index = - np.ones(refi_npts, dtype=np.int32)
tri_mask = self._triangulation.mask
if tri_mask is None:
found_index[refi_triangles] = np.repeat(ancestors,
3).reshape(-1, 3)
else:
# There is a subtlety here: we want to avoid whenever possible
# that refined points container is a masked triangle (which
# would result in artifacts in plots).
# So we impose the numbering from masked ancestors first,
# then overwrite it with unmasked ancestor numbers.
ancestor_mask = tri_mask[ancestors]
found_index[refi_triangles[ancestor_mask, :]
] = np.repeat(ancestors[ancestor_mask],
3).reshape(-1, 3)
found_index[refi_triangles[~ancestor_mask, :]
] = np.repeat(ancestors[~ancestor_mask],
3).reshape(-1, 3)
return refi_triangulation, found_index
else:
return refi_triangulation
def refine_field(self, z, triinterpolator=None, subdiv=3):
"""
Refines a field defined on the encapsulated triangulation.
Returns *refi_tri* (refined triangulation), *refi_z* (interpolated
values of the field at the node of the refined triangulation).
Parameters
----------
z : 1d-array-like of length ``n_points``
Values of the field to refine, defined at the nodes of the
encapsulated triangulation. (``n_points`` is the number of points
in the initial triangulation)
triinterpolator : :class:`~matplotlib.tri.TriInterpolator`, optional
Interpolator used for field interpolation. If not specified,
a :class:`~matplotlib.tri.CubicTriInterpolator` will
be used.
subdiv : integer, optional
Recursion level for the subdivision. Defaults to 3.
Each triangle will be divided into ``4**subdiv`` child triangles.
Returns
-------
refi_tri : :class:`~matplotlib.tri.Triangulation` object
The returned refined triangulation
refi_z : 1d array of length: *refi_tri* node count.
The returned interpolated field (at *refi_tri* nodes)
Examples
--------
The main application of this method is to plot high-quality
iso-contours on a coarse triangular grid (e.g., triangulation built
from relatively sparse test data):
.. plot:: mpl_examples/pylab_examples/tricontour_smooth_user.py
"""
if triinterpolator is None:
interp = matplotlib.tri.CubicTriInterpolator(
self._triangulation, z)
else:
if not isinstance(triinterpolator,
matplotlib.tri.TriInterpolator):
raise ValueError("Expected a TriInterpolator object")
interp = triinterpolator
refi_tri, found_index = self.refine_triangulation(
subdiv=subdiv, return_tri_index=True)
refi_z = interp._interpolate_multikeys(
refi_tri.x, refi_tri.y, tri_index=found_index)[0]
return refi_tri, refi_z
@staticmethod
def _refine_triangulation_once(triangulation, ancestors=None):
"""
This function refines a matplotlib.tri *triangulation* by splitting
each triangle into 4 child-masked_triangles built on the edges midside
nodes.
The masked triangles, if present, are also splitted but their children
returned masked.
If *ancestors* is not provided, returns only a new triangulation:
child_triangulation.
If the array-like key table *ancestor* is given, it shall be of shape
(ntri,) where ntri is the number of *triangulation* masked_triangles.
In this case, the function returns
(child_triangulation, child_ancestors)
child_ancestors is defined so that the 4 child masked_triangles share
the same index as their father: child_ancestors.shape = (4 * ntri,).
"""
x = triangulation.x
y = triangulation.y
# According to tri.triangulation doc:
# neighbors[i,j] is the triangle that is the neighbor
# to the edge from point index masked_triangles[i,j] to point
# index masked_triangles[i,(j+1)%3].
neighbors = triangulation.neighbors
triangles = triangulation.triangles
npts = np.shape(x)[0]
ntri = np.shape(triangles)[0]
if ancestors is not None:
ancestors = np.asarray(ancestors)
if np.shape(ancestors) != (ntri,):
raise ValueError(
"Incompatible shapes provide for triangulation"
".masked_triangles and ancestors: {0} and {1}".format(
np.shape(triangles), np.shape(ancestors)))
# Initiating tables refi_x and refi_y of the refined triangulation
# points
# hint: each apex is shared by 2 masked_triangles except the borders.
borders = np.sum(neighbors == -1)
added_pts = (3*ntri + borders) // 2
refi_npts = npts + added_pts
refi_x = np.zeros(refi_npts)
refi_y = np.zeros(refi_npts)
# First part of refi_x, refi_y is just the initial points
refi_x[:npts] = x
refi_y[:npts] = y
# Second part contains the edge midside nodes.
# Each edge belongs to 1 triangle (if border edge) or is shared by 2
# masked_triangles (interior edge).
# We first build 2 * ntri arrays of edge starting nodes (edge_elems,
# edge_apexes) ; we then extract only the masters to avoid overlaps.
# The so-called 'master' is the triangle with biggest index
# The 'slave' is the triangle with lower index
# (can be -1 if border edge)
# For slave and master we will identify the apex pointing to the edge
# start
edge_elems = np.ravel(np.vstack([np.arange(ntri, dtype=np.int32),
np.arange(ntri, dtype=np.int32),
np.arange(ntri, dtype=np.int32)]))
edge_apexes = np.ravel(np.vstack([np.zeros(ntri, dtype=np.int32),
np.ones(ntri, dtype=np.int32),
np.ones(ntri, dtype=np.int32)*2]))
edge_neighbors = neighbors[edge_elems, edge_apexes]
mask_masters = (edge_elems > edge_neighbors)
# Identifying the "masters" and adding to refi_x, refi_y vec
masters = edge_elems[mask_masters]
apex_masters = edge_apexes[mask_masters]
x_add = (x[triangles[masters, apex_masters]] +
x[triangles[masters, (apex_masters+1) % 3]]) * 0.5
y_add = (y[triangles[masters, apex_masters]] +
y[triangles[masters, (apex_masters+1) % 3]]) * 0.5
refi_x[npts:] = x_add
refi_y[npts:] = y_add
# Building the new masked_triangles ; each old masked_triangles hosts
# 4 new masked_triangles
# there are 6 pts to identify per 'old' triangle, 3 new_pt_corner and
# 3 new_pt_midside
new_pt_corner = triangles
# What is the index in refi_x, refi_y of point at middle of apex iapex
# of elem ielem ?
# If ielem is the apex master: simple count, given the way refi_x was
# built.
# If ielem is the apex slave: yet we do not know ; but we will soon
# using the neighbors table.
new_pt_midside = np.empty([ntri, 3], dtype=np.int32)
cum_sum = npts
for imid in range(3):
mask_st_loc = (imid == apex_masters)
n_masters_loc = np.sum(mask_st_loc)
elem_masters_loc = masters[mask_st_loc]
new_pt_midside[:, imid][elem_masters_loc] = np.arange(
n_masters_loc, dtype=np.int32) + cum_sum
cum_sum += n_masters_loc
# Now dealing with slave elems.
# for each slave element we identify the master and then the inode
# onces slave_masters is indentified, slave_masters_apex is such that:
# neighbors[slaves_masters, slave_masters_apex] == slaves
mask_slaves = np.logical_not(mask_masters)
slaves = edge_elems[mask_slaves]
slaves_masters = edge_neighbors[mask_slaves]
diff_table = np.abs(neighbors[slaves_masters, :] -
np.outer(slaves, np.ones(3, dtype=np.int32)))
slave_masters_apex = np.argmin(diff_table, axis=1)
slaves_apex = edge_apexes[mask_slaves]
new_pt_midside[slaves, slaves_apex] = new_pt_midside[
slaves_masters, slave_masters_apex]
# Builds the 4 child masked_triangles
child_triangles = np.empty([ntri*4, 3], dtype=np.int32)
child_triangles[0::4, :] = np.vstack([
new_pt_corner[:, 0], new_pt_midside[:, 0],
new_pt_midside[:, 2]]).T
child_triangles[1::4, :] = np.vstack([
new_pt_corner[:, 1], new_pt_midside[:, 1],
new_pt_midside[:, 0]]).T
child_triangles[2::4, :] = np.vstack([
new_pt_corner[:, 2], new_pt_midside[:, 2],
new_pt_midside[:, 1]]).T
child_triangles[3::4, :] = np.vstack([
new_pt_midside[:, 0], new_pt_midside[:, 1],
new_pt_midside[:, 2]]).T
child_triangulation = Triangulation(refi_x, refi_y, child_triangles)
# Builds the child mask
if triangulation.mask is not None:
child_triangulation.set_mask(np.repeat(triangulation.mask, 4))
if ancestors is None:
return child_triangulation
else:
return child_triangulation, np.repeat(ancestors, 4)
| |
#!/usr/bin/env python2
# Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved.
"""
Functions for creating temporary LMDBs
Used in test_views
"""
from __future__ import absolute_import
import argparse
import os
import sys
import time
# Find the best implementation available
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import lmdb
import numpy as np
import PIL.Image
if __name__ == '__main__':
dirname = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(dirname, '..', '..', '..', '..'))
import digits.config # noqa
# Import digits.config first to set the path to Caffe
import caffe_pb2 # noqa
IMAGE_SIZE = 10
TRAIN_IMAGE_COUNT = 100
VAL_IMAGE_COUNT = 250
def create_lmdbs(folder, image_width=None, image_height=None, image_count=None):
"""
Creates LMDBs for generic inference
Returns the filename for a test image
Creates these files in "folder":
train_images/
train_labels/
val_images/
val_labels/
mean.binaryproto
test.png
"""
if image_width is None:
image_width = IMAGE_SIZE
if image_height is None:
image_height = IMAGE_SIZE
if image_count is None:
train_image_count = TRAIN_IMAGE_COUNT
else:
train_image_count = image_count
val_image_count = VAL_IMAGE_COUNT
# Used to calculate the gradients later
yy, xx = np.mgrid[:image_height, :image_width].astype('float')
for phase, image_count in [
('train', train_image_count),
('val', val_image_count)]:
image_db = lmdb.open(os.path.join(folder, '%s_images' % phase),
map_async=True,
max_dbs=0)
label_db = lmdb.open(os.path.join(folder, '%s_labels' % phase),
map_async=True,
max_dbs=0)
image_sum = np.zeros((image_height, image_width), 'float64')
for i in xrange(image_count):
xslope, yslope = np.random.random_sample(2) - 0.5
a = xslope * 255 / image_width
b = yslope * 255 / image_height
image = a * (xx - image_width / 2) + b * (yy - image_height / 2) + 127.5
image_sum += image
image = image.astype('uint8')
pil_img = PIL.Image.fromarray(image)
# create image Datum
image_datum = caffe_pb2.Datum()
image_datum.height = image.shape[0]
image_datum.width = image.shape[1]
image_datum.channels = 1
s = StringIO()
pil_img.save(s, format='PNG')
image_datum.data = s.getvalue()
image_datum.encoded = True
_write_to_lmdb(image_db, str(i), image_datum.SerializeToString())
# create label Datum
label_datum = caffe_pb2.Datum()
label_datum.channels, label_datum.height, label_datum.width = 1, 1, 2
label_datum.float_data.extend(np.array([xslope, yslope]).flat)
_write_to_lmdb(label_db, str(i), label_datum.SerializeToString())
# close databases
image_db.close()
label_db.close()
# save mean
mean_image = (image_sum / image_count).astype('uint8')
_save_mean(mean_image, os.path.join(folder, '%s_mean.png' % phase))
_save_mean(mean_image, os.path.join(folder, '%s_mean.binaryproto' % phase))
# create test image
# The network should be able to easily produce two numbers >1
xslope, yslope = 0.5, 0.5
a = xslope * 255 / image_width
b = yslope * 255 / image_height
test_image = a * (xx - image_width / 2) + b * (yy - image_height / 2) + 127.5
test_image = test_image.astype('uint8')
pil_img = PIL.Image.fromarray(test_image)
test_image_filename = os.path.join(folder, 'test.png')
pil_img.save(test_image_filename)
return test_image_filename
def _write_to_lmdb(db, key, value):
"""
Write (key,value) to db
"""
success = False
while not success:
txn = db.begin(write=True)
try:
txn.put(key, value)
txn.commit()
success = True
except lmdb.MapFullError:
txn.abort()
# double the map_size
curr_limit = db.info()['map_size']
new_limit = curr_limit * 2
db.set_mapsize(new_limit) # double it
def _save_mean(mean, filename):
"""
Saves mean to file
Arguments:
mean -- the mean as an np.ndarray
filename -- the location to save the image
"""
if filename.endswith('.binaryproto'):
blob = caffe_pb2.BlobProto()
blob.num = 1
blob.channels = 1
blob.height, blob.width = mean.shape
blob.data.extend(mean.astype(float).flat)
with open(filename, 'wb') as outfile:
outfile.write(blob.SerializeToString())
elif filename.endswith(('.jpg', '.jpeg', '.png')):
image = PIL.Image.fromarray(mean)
image.save(filename)
else:
raise ValueError('unrecognized file extension')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create-LMDB tool - DIGITS')
# Positional arguments
parser.add_argument('folder',
help='Where to save the images'
)
# Optional arguments
parser.add_argument('-x', '--image_width',
type=int,
help='Width of the images')
parser.add_argument('-y', '--image_height',
type=int,
help='Height of the images')
parser.add_argument('-c', '--image_count',
type=int,
help='How many images')
args = vars(parser.parse_args())
if os.path.exists(args['folder']):
print 'ERROR: Folder already exists'
sys.exit(1)
else:
os.makedirs(args['folder'])
print 'Creating images at "%s" ...' % args['folder']
start_time = time.time()
create_lmdbs(args['folder'],
image_width=args['image_width'],
image_height=args['image_height'],
image_count=args['image_count'],
)
print 'Done after %s seconds' % (time.time() - start_time,)
| |
"""
Support for Epson projector.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/media_player.epson/
"""
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
DOMAIN, MEDIA_PLAYER_SCHEMA, PLATFORM_SCHEMA, SUPPORT_NEXT_TRACK,
SUPPORT_PREVIOUS_TRACK, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF,
SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_STEP,
MediaPlayerDevice)
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_HOST, CONF_NAME, CONF_PORT, CONF_SSL, STATE_OFF,
STATE_ON)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['epson-projector==0.1.3']
_LOGGER = logging.getLogger(__name__)
ATTR_CMODE = 'cmode'
DATA_EPSON = 'epson'
DEFAULT_NAME = 'EPSON Projector'
SERVICE_SELECT_CMODE = 'epson_select_cmode'
SUPPORT_CMODE = 33001
SUPPORT_EPSON = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE |\
SUPPORT_CMODE | SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_STEP | \
SUPPORT_NEXT_TRACK | SUPPORT_PREVIOUS_TRACK
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=80): cv.port,
vol.Optional(CONF_SSL, default=False): cv.boolean,
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the Epson media player platform."""
from epson_projector.const import (CMODE_LIST_SET)
if DATA_EPSON not in hass.data:
hass.data[DATA_EPSON] = []
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
ssl = config.get(CONF_SSL)
epson = EpsonProjector(async_get_clientsession(
hass, verify_ssl=False), name, host, port, ssl)
hass.data[DATA_EPSON].append(epson)
async_add_entities([epson], update_before_add=True)
async def async_service_handler(service):
"""Handle for services."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
devices = [device for device in hass.data[DATA_EPSON]
if device.entity_id in entity_ids]
else:
devices = hass.data[DATA_EPSON]
for device in devices:
if service.service == SERVICE_SELECT_CMODE:
cmode = service.data.get(ATTR_CMODE)
await device.select_cmode(cmode)
device.async_schedule_update_ha_state(True)
epson_schema = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_CMODE): vol.All(cv.string, vol.Any(*CMODE_LIST_SET))
})
hass.services.async_register(
DOMAIN, SERVICE_SELECT_CMODE, async_service_handler,
schema=epson_schema)
class EpsonProjector(MediaPlayerDevice):
"""Representation of Epson Projector Device."""
def __init__(self, websession, name, host, port, encryption):
"""Initialize entity to control Epson projector."""
import epson_projector as epson
from epson_projector.const import DEFAULT_SOURCES
self._name = name
self._projector = epson.Projector(
host, websession=websession, port=port)
self._cmode = None
self._source_list = list(DEFAULT_SOURCES.values())
self._source = None
self._volume = None
self._state = None
async def async_update(self):
"""Update state of device."""
from epson_projector.const import (
EPSON_CODES, POWER, CMODE, CMODE_LIST, SOURCE, VOLUME, BUSY,
SOURCE_LIST)
is_turned_on = await self._projector.get_property(POWER)
_LOGGER.debug("Project turn on/off status: %s", is_turned_on)
if is_turned_on and is_turned_on == EPSON_CODES[POWER]:
self._state = STATE_ON
cmode = await self._projector.get_property(CMODE)
self._cmode = CMODE_LIST.get(cmode, self._cmode)
source = await self._projector.get_property(SOURCE)
self._source = SOURCE_LIST.get(source, self._source)
volume = await self._projector.get_property(VOLUME)
if volume:
self._volume = volume
elif is_turned_on == BUSY:
self._state = STATE_ON
else:
self._state = STATE_OFF
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_EPSON
async def async_turn_on(self):
"""Turn on epson."""
from epson_projector.const import TURN_ON
await self._projector.send_command(TURN_ON)
async def async_turn_off(self):
"""Turn off epson."""
from epson_projector.const import TURN_OFF
await self._projector.send_command(TURN_OFF)
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def source(self):
"""Get current input sources."""
return self._source
@property
def volume_level(self):
"""Return the volume level of the media player (0..1)."""
return self._volume
async def select_cmode(self, cmode):
"""Set color mode in Epson."""
from epson_projector.const import (CMODE_LIST_SET)
await self._projector.send_command(CMODE_LIST_SET[cmode])
async def async_select_source(self, source):
"""Select input source."""
from epson_projector.const import INV_SOURCES
selected_source = INV_SOURCES[source]
await self._projector.send_command(selected_source)
async def async_mute_volume(self, mute):
"""Mute (true) or unmute (false) sound."""
from epson_projector.const import MUTE
await self._projector.send_command(MUTE)
async def async_volume_up(self):
"""Increase volume."""
from epson_projector.const import VOL_UP
await self._projector.send_command(VOL_UP)
async def async_volume_down(self):
"""Decrease volume."""
from epson_projector.const import VOL_DOWN
await self._projector.send_command(VOL_DOWN)
async def async_media_play(self):
"""Play media via Epson."""
from epson_projector.const import PLAY
await self._projector.send_command(PLAY)
async def async_media_pause(self):
"""Pause media via Epson."""
from epson_projector.const import PAUSE
await self._projector.send_command(PAUSE)
async def async_media_next_track(self):
"""Skip to next."""
from epson_projector.const import FAST
await self._projector.send_command(FAST)
async def async_media_previous_track(self):
"""Skip to previous."""
from epson_projector.const import BACK
await self._projector.send_command(BACK)
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
attributes = {}
if self._cmode is not None:
attributes[ATTR_CMODE] = self._cmode
return attributes
| |
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ron Dreslinski
# Simple test script
#
# "m5 test.py"
import os
import optparse
import sys
import m5
from m5.objects import *
m5.util.addToPath('../common')
# --------------------
# Define Command Line Options
# ====================
parser = optparse.OptionParser()
parser.add_option("-d", "--detailed", action="store_true")
parser.add_option("-t", "--timing", action="store_true")
parser.add_option("-m", "--maxtick", type="int")
parser.add_option("-c", "--numclusters",
help="Number of clusters", type="int")
parser.add_option("-n", "--numcpus",
help="Number of cpus in total", type="int")
parser.add_option("-f", "--frequency",
default = "1GHz",
help="Frequency of each CPU")
parser.add_option("--l1size",
default = "32kB")
parser.add_option("--l1latency",
default = 1)
parser.add_option("--l2size",
default = "256kB")
parser.add_option("--l2latency",
default = 10)
parser.add_option("--rootdir",
help="ROot directory of Splash2",
default="/dist/splash2/codes/")
parser.add_option("-b", "--benchmark",
help="Splash 2 benchmark to run")
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
# --------------------
# Define Splash2 Benchmarks
# ====================
class Cholesky(LiveProcess):
executable = options.rootdir + '/kernels/cholesky/CHOLESKY'
cmd = 'CHOLESKY -p' + str(options.numcpus) + ' '\
+ options.rootdir + '/kernels/cholesky/inputs/tk23.O'
class FFT(LiveProcess):
executable = options.rootdir + 'kernels/fft/FFT'
cmd = 'FFT -p' + str(options.numcpus) + ' -m18'
class LU_contig(LiveProcess):
executable = options.rootdir + 'kernels/lu/contiguous_blocks/LU'
cmd = 'LU -p' + str(options.numcpus)
class LU_noncontig(LiveProcess):
executable = options.rootdir + 'kernels/lu/non_contiguous_blocks/LU'
cmd = 'LU -p' + str(options.numcpus)
class Radix(LiveProcess):
executable = options.rootdir + 'kernels/radix/RADIX'
cmd = 'RADIX -n524288 -p' + str(options.numcpus)
class Barnes(LiveProcess):
executable = options.rootdir + 'apps/barnes/BARNES'
cmd = 'BARNES'
input = options.rootdir + 'apps/barnes/input.p' + str(options.numcpus)
class FMM(LiveProcess):
executable = options.rootdir + 'apps/fmm/FMM'
cmd = 'FMM'
input = options.rootdir + 'apps/fmm/inputs/input.2048.p' + str(options.numcpus)
class Ocean_contig(LiveProcess):
executable = options.rootdir + 'apps/ocean/contiguous_partitions/OCEAN'
cmd = 'OCEAN -p' + str(options.numcpus)
class Ocean_noncontig(LiveProcess):
executable = options.rootdir + 'apps/ocean/non_contiguous_partitions/OCEAN'
cmd = 'OCEAN -p' + str(options.numcpus)
class Raytrace(LiveProcess):
executable = options.rootdir + 'apps/raytrace/RAYTRACE'
cmd = 'RAYTRACE -p' + str(options.numcpus) + ' ' \
+ options.rootdir + 'apps/raytrace/inputs/teapot.env'
class Water_nsquared(LiveProcess):
executable = options.rootdir + 'apps/water-nsquared/WATER-NSQUARED'
cmd = 'WATER-NSQUARED'
input = options.rootdir + 'apps/water-nsquared/input.p' + str(options.numcpus)
class Water_spatial(LiveProcess):
executable = options.rootdir + 'apps/water-spatial/WATER-SPATIAL'
cmd = 'WATER-SPATIAL'
input = options.rootdir + 'apps/water-spatial/input.p' + str(options.numcpus)
# --------------------
# Base L1 Cache Definition
# ====================
class L1(Cache):
latency = options.l1latency
mshrs = 12
tgts_per_mshr = 8
# ----------------------
# Base L2 Cache Definition
# ----------------------
class L2(Cache):
latency = options.l2latency
mshrs = 92
tgts_per_mshr = 16
write_buffers = 8
# ----------------------
# Define the clusters with their cpus
# ----------------------
class Cluster:
pass
cpusPerCluster = options.numcpus/options.numclusters
busFrequency = Frequency(options.frequency)
busFrequency *= cpusPerCluster
all_cpus = []
all_l1s = []
all_l1buses = []
if options.timing:
clusters = [ Cluster() for i in xrange(options.numclusters)]
for j in xrange(options.numclusters):
clusters[j].id = j
for cluster in clusters:
cluster.clusterbus = L2XBar(clock=busFrequency)
all_l1buses += [cluster.clusterbus]
cluster.cpus = [TimingSimpleCPU(cpu_id = i + cluster.id,
clock=options.frequency)
for i in xrange(cpusPerCluster)]
all_cpus += cluster.cpus
cluster.l1 = L1(size=options.l1size, assoc = 4)
all_l1s += [cluster.l1]
elif options.detailed:
clusters = [ Cluster() for i in xrange(options.numclusters)]
for j in xrange(options.numclusters):
clusters[j].id = j
for cluster in clusters:
cluster.clusterbus = L2XBar(clock=busFrequency)
all_l1buses += [cluster.clusterbus]
cluster.cpus = [DerivO3CPU(cpu_id = i + cluster.id,
clock=options.frequency)
for i in xrange(cpusPerCluster)]
all_cpus += cluster.cpus
cluster.l1 = L1(size=options.l1size, assoc = 4)
all_l1s += [cluster.l1]
else:
clusters = [ Cluster() for i in xrange(options.numclusters)]
for j in xrange(options.numclusters):
clusters[j].id = j
for cluster in clusters:
cluster.clusterbus = L2XBar(clock=busFrequency)
all_l1buses += [cluster.clusterbus]
cluster.cpus = [AtomicSimpleCPU(cpu_id = i + cluster.id,
clock=options.frequency)
for i in xrange(cpusPerCluster)]
all_cpus += cluster.cpus
cluster.l1 = L1(size=options.l1size, assoc = 4)
all_l1s += [cluster.l1]
# ----------------------
# Create a system, and add system wide objects
# ----------------------
system = System(cpu = all_cpus, l1_ = all_l1s, l1bus_ = all_l1buses,
physmem = SimpleMemory(),
membus = SystemXBar(clock = busFrequency))
system.clock = '1GHz'
system.toL2bus = L2XBar(clock = busFrequency)
system.l2 = L2(size = options.l2size, assoc = 8)
# ----------------------
# Connect the L2 cache and memory together
# ----------------------
system.physmem.port = system.membus.master
system.l2.cpu_side = system.toL2bus.slave
system.l2.mem_side = system.membus.master
# ----------------------
# Connect the L2 cache and clusters together
# ----------------------
for cluster in clusters:
cluster.l1.cpu_side = cluster.clusterbus.master
cluster.l1.mem_side = system.toL2bus.slave
for cpu in cluster.cpus:
cpu.icache_port = cluster.clusterbus.slave
cpu.dcache_port = cluster.clusterbus.slave
# ----------------------
# Define the root
# ----------------------
root = Root(full_system = False, system = system)
# --------------------
# Pick the correct Splash2 Benchmarks
# ====================
if options.benchmark == 'Cholesky':
root.workload = Cholesky()
elif options.benchmark == 'FFT':
root.workload = FFT()
elif options.benchmark == 'LUContig':
root.workload = LU_contig()
elif options.benchmark == 'LUNoncontig':
root.workload = LU_noncontig()
elif options.benchmark == 'Radix':
root.workload = Radix()
elif options.benchmark == 'Barnes':
root.workload = Barnes()
elif options.benchmark == 'FMM':
root.workload = FMM()
elif options.benchmark == 'OceanContig':
root.workload = Ocean_contig()
elif options.benchmark == 'OceanNoncontig':
root.workload = Ocean_noncontig()
elif options.benchmark == 'Raytrace':
root.workload = Raytrace()
elif options.benchmark == 'WaterNSquared':
root.workload = Water_nsquared()
elif options.benchmark == 'WaterSpatial':
root.workload = Water_spatial()
else:
m5.util.panic("""
The --benchmark environment variable was set to something improper.
Use Cholesky, FFT, LUContig, LUNoncontig, Radix, Barnes, FMM, OceanContig,
OceanNoncontig, Raytrace, WaterNSquared, or WaterSpatial
""")
# --------------------
# Assign the workload to the cpus
# ====================
for cluster in clusters:
for cpu in cluster.cpus:
cpu.workload = root.workload
# ----------------------
# Run the simulation
# ----------------------
if options.timing or options.detailed:
root.system.mem_mode = 'timing'
# instantiate configuration
m5.instantiate()
# simulate until program terminates
if options.maxtick:
exit_event = m5.simulate(options.maxtick)
else:
exit_event = m5.simulate(m5.MaxTick)
print 'Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()
| |
"""
kombu.entity
================
Exchange and Queue declarations.
"""
from __future__ import absolute_import
from .abstract import MaybeChannelBound
TRANSIENT_DELIVERY_MODE = 1
PERSISTENT_DELIVERY_MODE = 2
DELIVERY_MODES = {'transient': TRANSIENT_DELIVERY_MODE,
'persistent': PERSISTENT_DELIVERY_MODE}
__all__ = ['Exchange', 'Queue']
def pretty_bindings(bindings):
return '[%s]' % (', '.join(map(str, bindings)))
class Exchange(MaybeChannelBound):
"""An Exchange declaration.
:keyword name: See :attr:`name`.
:keyword type: See :attr:`type`.
:keyword channel: See :attr:`channel`.
:keyword durable: See :attr:`durable`.
:keyword auto_delete: See :attr:`auto_delete`.
:keyword delivery_mode: See :attr:`delivery_mode`.
:keyword arguments: See :attr:`arguments`.
.. attribute:: name
Name of the exchange. Default is no name (the default exchange).
.. attribute:: type
AMQP defines four default exchange types (routing algorithms) that
covers most of the common messaging use cases. An AMQP broker can
also define additional exchange types, so see your broker
manual for more information about available exchange types.
* `direct` (*default*)
Direct match between the routing key in the message, and the
routing criteria used when a queue is bound to this exchange.
* `topic`
Wildcard match between the routing key and the routing pattern
specified in the exchange/queue binding. The routing key is
treated as zero or more words delimited by `"."` and
supports special wildcard characters. `"*"` matches a
single word and `"#"` matches zero or more words.
* `fanout`
Queues are bound to this exchange with no arguments. Hence any
message sent to this exchange will be forwarded to all queues
bound to this exchange.
* `headers`
Queues are bound to this exchange with a table of arguments
containing headers and values (optional). A special argument
named "x-match" determines the matching algorithm, where
`"all"` implies an `AND` (all pairs must match) and
`"any"` implies `OR` (at least one pair must match).
:attr:`arguments` is used to specify the arguments.
This description of AMQP exchange types was shamelessly stolen
from the blog post `AMQP in 10 minutes: Part 4`_ by
Rajith Attapattu. This article is recommended reading.
.. _`AMQP in 10 minutes: Part 4`:
http://bit.ly/amqp-exchange-types
.. attribute:: channel
The channel the exchange is bound to (if bound).
.. attribute:: durable
Durable exchanges remain active when a server restarts. Non-durable
exchanges (transient exchanges) are purged when a server restarts.
Default is :const:`True`.
.. attribute:: auto_delete
If set, the exchange is deleted when all queues have finished
using it. Default is :const:`False`.
.. attribute:: delivery_mode
The default delivery mode used for messages. The value is an integer,
or alias string.
* 1 or `"transient"`
The message is transient. Which means it is stored in
memory only, and is lost if the server dies or restarts.
* 2 or "persistent" (*default*)
The message is persistent. Which means the message is
stored both in-memory, and on disk, and therefore
preserved if the server dies or restarts.
The default value is 2 (persistent).
.. attribute:: arguments
Additional arguments to specify when the exchange is declared.
"""
TRANSIENT_DELIVERY_MODE = TRANSIENT_DELIVERY_MODE
PERSISTENT_DELIVERY_MODE = PERSISTENT_DELIVERY_MODE
name = ''
type = 'direct'
durable = True
auto_delete = False
passive = False
delivery_mode = PERSISTENT_DELIVERY_MODE
attrs = (
('name', None),
('type', None),
('arguments', None),
('durable', bool),
('passive', bool),
('auto_delete', bool),
('delivery_mode', lambda m: DELIVERY_MODES.get(m) or m),
)
def __init__(self, name='', type='', channel=None, **kwargs):
super(Exchange, self).__init__(**kwargs)
self.name = name or self.name
self.type = type or self.type
self.maybe_bind(channel)
def __hash__(self):
return hash('E|%s' % (self.name, ))
def declare(self, nowait=False, passive=None):
"""Declare the exchange.
Creates the exchange on the broker.
:keyword nowait: If set the server will not respond, and a
response will not be waited for. Default is :const:`False`.
"""
passive = self.passive if passive is None else passive
if self.name:
return self.channel.exchange_declare(
exchange=self.name, type=self.type, durable=self.durable,
auto_delete=self.auto_delete, arguments=self.arguments,
nowait=nowait, passive=passive,
)
def bind_to(self, exchange='', routing_key='',
arguments=None, nowait=False, **kwargs):
"""Binds the exchange to another exchange.
:keyword nowait: If set the server will not respond, and the call
will not block waiting for a response. Default is :const:`False`.
"""
if isinstance(exchange, Exchange):
exchange = exchange.name
return self.channel.exchange_bind(destination=self.name,
source=exchange,
routing_key=routing_key,
nowait=nowait,
arguments=arguments)
def unbind_from(self, source='', routing_key='',
nowait=False, arguments=None):
"""Delete previously created exchange binding from the server."""
if isinstance(source, Exchange):
source = source.name
return self.channel.exchange_unbind(destination=self.name,
source=source,
routing_key=routing_key,
nowait=nowait,
arguments=arguments)
def Message(self, body, delivery_mode=None, priority=None,
content_type=None, content_encoding=None,
properties=None, headers=None):
"""Create message instance to be sent with :meth:`publish`.
:param body: Message body.
:keyword delivery_mode: Set custom delivery mode. Defaults
to :attr:`delivery_mode`.
:keyword priority: Message priority, 0 to 9. (currently not
supported by RabbitMQ).
:keyword content_type: The messages content_type. If content_type
is set, no serialization occurs as it is assumed this is either
a binary object, or you've done your own serialization.
Leave blank if using built-in serialization as our library
properly sets content_type.
:keyword content_encoding: The character set in which this object
is encoded. Use "binary" if sending in raw binary objects.
Leave blank if using built-in serialization as our library
properly sets content_encoding.
:keyword properties: Message properties.
:keyword headers: Message headers.
"""
properties = {} if properties is None else properties
dm = delivery_mode or self.delivery_mode
properties['delivery_mode'] = \
DELIVERY_MODES[dm] if (dm != 2 and dm != 1) else dm
return self.channel.prepare_message(body,
properties=properties,
priority=priority,
content_type=content_type,
content_encoding=content_encoding,
headers=headers)
def publish(self, message, routing_key=None, mandatory=False,
immediate=False, exchange=None):
"""Publish message.
:param message: :meth:`Message` instance to publish.
:param routing_key: Routing key.
:param mandatory: Currently not supported.
:param immediate: Currently not supported.
"""
exchange = exchange or self.name
return self.channel.basic_publish(message,
exchange=exchange,
routing_key=routing_key,
mandatory=mandatory,
immediate=immediate)
def delete(self, if_unused=False, nowait=False):
"""Delete the exchange declaration on server.
:keyword if_unused: Delete only if the exchange has no bindings.
Default is :const:`False`.
:keyword nowait: If set the server will not respond, and a
response will not be waited for. Default is :const:`False`.
"""
return self.channel.exchange_delete(exchange=self.name,
if_unused=if_unused,
nowait=nowait)
def __eq__(self, other):
if isinstance(other, Exchange):
return (self.name == other.name and
self.type == other.type and
self.arguments == other.arguments and
self.durable == other.durable and
self.auto_delete == other.auto_delete and
self.delivery_mode == other.delivery_mode)
return False
def __repr__(self):
return super(Exchange, self).__repr__(str(self))
def __str__(self):
return 'Exchange %s(%s)' % (self.name or repr(''), self.type)
@property
def can_cache_declaration(self):
return self.durable
class binding(object):
"""Represents a queue or exchange binding.
:keyword exchange: Exchange to bind to.
:keyword routing_key: Routing key used as binding key.
:keyword arguments: Arguments for bind operation.
:keyword unbind_arguments: Arguments for unbind operation.
"""
def __init__(self, exchange=None, routing_key='',
arguments=None, unbind_arguments=None):
self.exchange = exchange
self.routing_key = routing_key
self.arguments = arguments
self.unbind_arguments = unbind_arguments
def declare(self, channel, nowait=False):
"""Declare destination exchange."""
if self.exchange and self.exchange.name:
ex = self.exchange(channel)
ex.declare(nowait=nowait)
def bind(self, entity, nowait=False):
"""Bind entity to this binding."""
entity.bind_to(exchange=self.exchange,
routing_key=self.routing_key,
arguments=self.arguments,
nowait=nowait)
def unbind(self, entity, nowait=False):
"""Unbind entity from this binding."""
entity.unbind_from(self.exchange,
routing_key=self.routing_key,
arguments=self.unbind_arguments,
nowait=nowait)
def __repr__(self):
return '<binding: %s>' % (self, )
def __str__(self):
return '%s->%s' % (self.exchange.name, self.routing_key)
class Queue(MaybeChannelBound):
"""A Queue declaration.
:keyword name: See :attr:`name`.
:keyword exchange: See :attr:`exchange`.
:keyword routing_key: See :attr:`routing_key`.
:keyword channel: See :attr:`channel`.
:keyword durable: See :attr:`durable`.
:keyword exclusive: See :attr:`exclusive`.
:keyword auto_delete: See :attr:`auto_delete`.
:keyword queue_arguments: See :attr:`queue_arguments`.
:keyword binding_arguments: See :attr:`binding_arguments`.
:keyword on_declared: See :attr:`on_declared`
.. attribute:: name
Name of the queue. Default is no name (default queue destination).
.. attribute:: exchange
The :class:`Exchange` the queue binds to.
.. attribute:: routing_key
The routing key (if any), also called *binding key*.
The interpretation of the routing key depends on
the :attr:`Exchange.type`.
* direct exchange
Matches if the routing key property of the message and
the :attr:`routing_key` attribute are identical.
* fanout exchange
Always matches, even if the binding does not have a key.
* topic exchange
Matches the routing key property of the message by a primitive
pattern matching scheme. The message routing key then consists
of words separated by dots (`"."`, like domain names), and
two special characters are available; star (`"*"`) and hash
(`"#"`). The star matches any word, and the hash matches
zero or more words. For example `"*.stock.#"` matches the
routing keys `"usd.stock"` and `"eur.stock.db"` but not
`"stock.nasdaq"`.
.. attribute:: channel
The channel the Queue is bound to (if bound).
.. attribute:: durable
Durable queues remain active when a server restarts.
Non-durable queues (transient queues) are purged if/when
a server restarts.
Note that durable queues do not necessarily hold persistent
messages, although it does not make sense to send
persistent messages to a transient queue.
Default is :const:`True`.
.. attribute:: exclusive
Exclusive queues may only be consumed from by the
current connection. Setting the 'exclusive' flag
always implies 'auto-delete'.
Default is :const:`False`.
.. attribute:: auto_delete
If set, the queue is deleted when all consumers have
finished using it. Last consumer can be cancelled
either explicitly or because its channel is closed. If
there was no consumer ever on the queue, it won't be
deleted.
.. attribute:: queue_arguments
Additional arguments used when declaring the queue.
.. attribute:: binding_arguments
Additional arguments used when binding the queue.
.. attribute:: alias
Unused in Kombu, but applications can take advantage of this.
For example to give alternate names to queues with automatically
generated queue names.
.. attribute:: on_declared
Optional callback to be applied when the queue has been
declared (the ``queue_declare`` method returns).
This must be function with a signature that accepts at least 3
positional arguments: ``(name, messages, consumers)``.
"""
name = ''
exchange = Exchange('')
routing_key = ''
durable = True
exclusive = False
auto_delete = False
no_ack = False
attrs = (
('name', None),
('exchange', None),
('routing_key', None),
('queue_arguments', None),
('binding_arguments', None),
('durable', bool),
('exclusive', bool),
('auto_delete', bool),
('no_ack', None),
('alias', None),
('bindings', list),
)
def __init__(self, name='', exchange=None, routing_key='',
channel=None, bindings=None, on_declared=None,
**kwargs):
super(Queue, self).__init__(**kwargs)
self.name = name or self.name
self.exchange = exchange or self.exchange
self.routing_key = routing_key or self.routing_key
self.bindings = set(bindings or [])
self.on_declared = on_declared
# allows Queue('name', [binding(...), binding(...), ...])
if isinstance(exchange, (list, tuple, set)):
self.bindings |= set(exchange)
if self.bindings:
self.exchange = None
# exclusive implies auto-delete.
if self.exclusive:
self.auto_delete = True
self.maybe_bind(channel)
def bind(self, channel):
on_declared = self.on_declared
bound = super(Queue, self).bind(channel)
bound.on_declared = on_declared
return bound
def __hash__(self):
return hash('Q|%s' % (self.name, ))
def when_bound(self):
if self.exchange:
self.exchange = self.exchange(self.channel)
def declare(self, nowait=False):
"""Declares the queue, the exchange and binds the queue to
the exchange."""
# - declare main binding.
if self.exchange:
self.exchange.declare(nowait)
self.queue_declare(nowait, passive=False)
if self.exchange and self.exchange.name:
self.queue_bind(nowait)
# - declare extra/multi-bindings.
for B in self.bindings:
B.declare(self.channel)
B.bind(self, nowait=nowait)
return self.name
def queue_declare(self, nowait=False, passive=False):
"""Declare queue on the server.
:keyword nowait: Do not wait for a reply.
:keyword passive: If set, the server will not create the queue.
The client can use this to check whether a queue exists
without modifying the server state.
"""
ret = self.channel.queue_declare(queue=self.name,
passive=passive,
durable=self.durable,
exclusive=self.exclusive,
auto_delete=self.auto_delete,
arguments=self.queue_arguments,
nowait=nowait)
if not self.name:
self.name = ret[0]
if self.on_declared:
self.on_declared(*ret)
return ret
def queue_bind(self, nowait=False):
"""Create the queue binding on the server."""
return self.bind_to(self.exchange, self.routing_key,
self.binding_arguments, nowait=nowait)
def bind_to(self, exchange='', routing_key='',
arguments=None, nowait=False):
if isinstance(exchange, Exchange):
exchange = exchange.name
return self.channel.queue_bind(queue=self.name,
exchange=exchange,
routing_key=routing_key,
arguments=arguments,
nowait=nowait)
def get(self, no_ack=None):
"""Poll the server for a new message.
Returns the message instance if a message was available,
or :const:`None` otherwise.
:keyword no_ack: If enabled the broker will automatically
ack messages.
This method provides direct access to the messages in a
queue using a synchronous dialogue, designed for
specific types of applications where synchronous functionality
is more important than performance.
"""
no_ack = self.no_ack if no_ack is None else no_ack
message = self.channel.basic_get(queue=self.name, no_ack=no_ack)
if message is not None:
m2p = getattr(self.channel, 'message_to_python', None)
if m2p:
message = m2p(message)
return message
def purge(self, nowait=False):
"""Remove all ready messages from the queue."""
return self.channel.queue_purge(queue=self.name,
nowait=nowait) or 0
def consume(self, consumer_tag='', callback=None,
no_ack=None, nowait=False):
"""Start a queue consumer.
Consumers last as long as the channel they were created on, or
until the client cancels them.
:keyword consumer_tag: Unique identifier for the consumer. The
consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
:keyword no_ack: If enabled the broker will automatically ack
messages.
:keyword nowait: Do not wait for a reply.
:keyword callback: callback called for each delivered message
"""
if no_ack is None:
no_ack = self.no_ack
return self.channel.basic_consume(queue=self.name,
no_ack=no_ack,
consumer_tag=consumer_tag or '',
callback=callback,
nowait=nowait)
def cancel(self, consumer_tag):
"""Cancel a consumer by consumer tag."""
return self.channel.basic_cancel(consumer_tag)
def delete(self, if_unused=False, if_empty=False, nowait=False):
"""Delete the queue.
:keyword if_unused: If set, the server will only delete the queue
if it has no consumers. A channel error will be raised
if the queue has consumers.
:keyword if_empty: If set, the server will only delete the queue
if it is empty. If it is not empty a channel error will be raised.
:keyword nowait: Do not wait for a reply.
"""
return self.channel.queue_delete(queue=self.name,
if_unused=if_unused,
if_empty=if_empty,
nowait=nowait)
def queue_unbind(self, arguments=None, nowait=False):
return self.unbind_from(self.exchange, self.routing_key,
arguments, nowait)
def unbind_from(self, exchange='', routing_key='',
arguments=None, nowait=False):
"""Unbind queue by deleting the binding from the server."""
return self.channel.queue_unbind(queue=self.name,
exchange=exchange.name,
routing_key=routing_key,
arguments=arguments,
nowait=nowait)
def __eq__(self, other):
if isinstance(other, Queue):
return (self.name == other.name and
self.exchange == other.exchange and
self.routing_key == other.routing_key and
self.queue_arguments == other.queue_arguments and
self.binding_arguments == other.binding_arguments and
self.durable == other.durable and
self.exclusive == other.exclusive and
self.auto_delete == other.auto_delete)
return False
def __repr__(self):
s = super(Queue, self).__repr__
if self.bindings:
return s('Queue %r -> %s' % (
self.name,
pretty_bindings(self.bindings),
))
return s('Queue %r -> %s -> %r' % (
self.name,
self.exchange,
self.routing_key or '',
))
@property
def can_cache_declaration(self):
return self.durable
@classmethod
def from_dict(self, queue, **options):
binding_key = options.get('binding_key') or options.get('routing_key')
e_durable = options.get('exchange_durable')
if e_durable is None:
e_durable = options.get('durable')
e_auto_delete = options.get('exchange_auto_delete')
if e_auto_delete is None:
e_auto_delete = options.get('auto_delete')
q_durable = options.get('queue_durable')
if q_durable is None:
q_durable = options.get('durable')
q_auto_delete = options.get('queue_auto_delete')
if q_auto_delete is None:
q_auto_delete = options.get('auto_delete')
e_arguments = options.get('exchange_arguments')
q_arguments = options.get('queue_arguments')
b_arguments = options.get('binding_arguments')
bindings = options.get('bindings')
exchange = Exchange(options.get('exchange'),
type=options.get('exchange_type'),
delivery_mode=options.get('delivery_mode'),
routing_key=options.get('routing_key'),
durable=e_durable,
auto_delete=e_auto_delete,
arguments=e_arguments)
return Queue(queue,
exchange=exchange,
routing_key=binding_key,
durable=q_durable,
exclusive=options.get('exclusive'),
auto_delete=q_auto_delete,
no_ack=options.get('no_ack'),
queue_arguments=q_arguments,
binding_arguments=b_arguments,
bindings=bindings)
| |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import boto3
from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.models import Connection
from tests.compat import mock
try:
from moto import mock_emr, mock_dynamodb2, mock_sts, mock_iam
except ImportError:
mock_emr = None
mock_dynamodb2 = None
mock_sts = None
mock_iam = None
class TestAwsHook(unittest.TestCase):
@unittest.skipIf(mock_emr is None, 'mock_emr package not present')
@mock_emr
def test_get_client_type_returns_a_boto3_client_of_the_requested_type(self):
client = boto3.client('emr', region_name='us-east-1')
if len(client.list_clusters()['Clusters']):
raise ValueError('AWS not properly mocked')
hook = AwsHook(aws_conn_id='aws_default')
client_from_hook = hook.get_client_type('emr')
self.assertEqual(client_from_hook.list_clusters()['Clusters'], [])
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamo2 package not present')
@mock_dynamodb2
def test_get_resource_type_returns_a_boto3_resource_of_the_requested_type(self):
hook = AwsHook(aws_conn_id='aws_default')
resource_from_hook = hook.get_resource_type('dynamodb')
# this table needs to be created in production
table = resource_from_hook.create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'name',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
table.meta.client.get_waiter(
'table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 0)
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamo2 package not present')
@mock_dynamodb2
def test_get_session_returns_a_boto3_session(self):
hook = AwsHook(aws_conn_id='aws_default')
session_from_hook = hook.get_session()
resource_from_session = session_from_hook.resource('dynamodb')
table = resource_from_session.create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'name',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
table.meta.client.get_waiter(
'table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 0)
@mock.patch.object(AwsHook, 'get_connection')
def test_get_credentials_from_login_with_token(self, mock_get_connection):
mock_connection = Connection(login='aws_access_key_id',
password='aws_secret_access_key',
extra='{"aws_session_token": "test_token"}'
)
mock_get_connection.return_value = mock_connection
hook = AwsHook()
credentials_from_hook = hook.get_credentials()
self.assertEqual(credentials_from_hook.access_key, 'aws_access_key_id')
self.assertEqual(credentials_from_hook.secret_key, 'aws_secret_access_key')
self.assertEqual(credentials_from_hook.token, 'test_token')
@mock.patch.object(AwsHook, 'get_connection')
def test_get_credentials_from_login_without_token(self, mock_get_connection):
mock_connection = Connection(login='aws_access_key_id',
password='aws_secret_access_key',
)
mock_get_connection.return_value = mock_connection
hook = AwsHook()
credentials_from_hook = hook.get_credentials()
self.assertEqual(credentials_from_hook.access_key, 'aws_access_key_id')
self.assertEqual(credentials_from_hook.secret_key, 'aws_secret_access_key')
self.assertIsNone(credentials_from_hook.token)
@mock.patch.object(AwsHook, 'get_connection')
def test_get_credentials_from_extra_with_token(self, mock_get_connection):
mock_connection = Connection(
extra='{"aws_access_key_id": "aws_access_key_id",'
'"aws_secret_access_key": "aws_secret_access_key",'
' "aws_session_token": "session_token"}'
)
mock_get_connection.return_value = mock_connection
hook = AwsHook()
credentials_from_hook = hook.get_credentials()
self.assertEqual(credentials_from_hook.access_key, 'aws_access_key_id')
self.assertEqual(credentials_from_hook.secret_key, 'aws_secret_access_key')
self.assertEquals(credentials_from_hook.token, 'session_token')
@mock.patch.object(AwsHook, 'get_connection')
def test_get_credentials_from_extra_without_token(self, mock_get_connection):
mock_connection = Connection(
extra='{"aws_access_key_id": "aws_access_key_id",'
'"aws_secret_access_key": "aws_secret_access_key"}'
)
mock_get_connection.return_value = mock_connection
hook = AwsHook()
credentials_from_hook = hook.get_credentials()
self.assertEqual(credentials_from_hook.access_key, 'aws_access_key_id')
self.assertEqual(credentials_from_hook.secret_key, 'aws_secret_access_key')
self.assertIsNone(credentials_from_hook.token)
@mock.patch('airflow.contrib.hooks.aws_hook._parse_s3_config',
return_value=('aws_access_key_id', 'aws_secret_access_key'))
@mock.patch.object(AwsHook, 'get_connection')
def test_get_credentials_from_extra_with_s3_config_and_profile(
self, mock_get_connection, mock_parse_s3_config
):
mock_connection = Connection(
extra='{"s3_config_format": "aws", '
'"profile": "test", '
'"s3_config_file": "aws-credentials", '
'"region_name": "us-east-1"}')
mock_get_connection.return_value = mock_connection
hook = AwsHook()
hook._get_credentials(region_name=None)
mock_parse_s3_config.assert_called_once_with(
'aws-credentials',
'aws',
'test'
)
@unittest.skipIf(mock_sts is None, 'mock_sts package not present')
@mock.patch.object(AwsHook, 'get_connection')
@mock_sts
def test_get_credentials_from_role_arn(self, mock_get_connection):
mock_connection = Connection(
extra='{"role_arn":"arn:aws:iam::123456:role/role_arn"}')
mock_get_connection.return_value = mock_connection
hook = AwsHook()
credentials_from_hook = hook.get_credentials()
self.assertEqual(credentials_from_hook.access_key, 'AKIAIOSFODNN7EXAMPLE')
self.assertEqual(credentials_from_hook.secret_key,
'aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY')
self.assertEqual(credentials_from_hook.token,
'BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh'
'3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4I'
'gRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15'
'fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE')
@unittest.skipIf(mock_sts is None, 'mock_sts package not present')
@mock.patch.object(AwsHook, 'get_connection')
@mock_sts
def test_get_credentials_from_role_arn_with_external_id(self, mock_get_connection):
mock_connection = Connection(
extra='{"role_arn":"arn:aws:iam::123456:role/role_arn",'
' "external_id":"external_id"}')
mock_get_connection.return_value = mock_connection
hook = AwsHook()
credentials_from_hook = hook.get_credentials()
self.assertEqual(credentials_from_hook.access_key, 'AKIAIOSFODNN7EXAMPLE')
self.assertEqual(credentials_from_hook.secret_key,
'aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY')
self.assertEqual(credentials_from_hook.token,
'BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh'
'3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4I'
'gRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15'
'fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE')
@unittest.skipIf(mock_iam is None, 'mock_iam package not present')
@mock_iam
def test_expand_role(self):
conn = boto3.client('iam', region_name='us-east-1')
conn.create_role(RoleName='test-role', AssumeRolePolicyDocument='some policy')
hook = AwsHook()
arn = hook.expand_role('test-role')
expect_arn = conn.get_role(RoleName='test-role').get('Role').get('Arn')
self.assertEqual(arn, expect_arn)
if __name__ == '__main__':
unittest.main()
| |
from __future__ import absolute_import
import copy
import inspect
import types
import numpy as np
from ..utils.np_utils import to_categorical
from ..models import Sequential
class BaseWrapper(object):
'''Base class for the Keras scikit-learn wrapper.
Warning: This class should not be used directly.
Use descendant classes instead.
# Arguments
build_fn: callable function or class instance
sk_params: model parameters & fitting parameters
The build_fn should construct, compile and return a Keras model, which
will then be used to fit/predict. One of the following
three values could be passed to build_fn:
1. A function
2. An instance of a class that implements the __call__ method
3. None. This means you implement a class that inherits from either
`KerasClassifier` or `KerasRegressor`. The __call__ method of the
present class will then be treated as the default build_fn.
`sk_params` takes both model parameters and fitting parameters. Legal model
parameters are the arguments of `build_fn`. Note that like all other
estimators in scikit-learn, 'build_fn' should provide defalult values for
its arguments, so that you could create the estimator without passing any
values to `sk_params`.
`sk_params` could also accept parameters for calling `fit`, `predict`,
`predict_proba`, and `score` methods (e.g., `nb_epoch`, `batch_size`).
fitting (predicting) parameters are selected in the following order:
1. Values passed to the dictionary arguments of
`fit`, `predict`, `predict_proba`, and `score` methods
2. Values passed to `sk_params`
3. The default values of the `keras.models.Sequential`
`fit`, `predict`, `predict_proba` and `score` methods
When using scikit-learn's `grid_search` API, legal tunable parameters are
those you could pass to `sk_params`, including fitting parameters.
In other words, you could use `grid_search` to search for the best
`batch_size` or `nb_epoch` as well as the model parameters.
'''
def __init__(self, build_fn=None, **sk_params):
self.build_fn = build_fn
self.sk_params = sk_params
def get_params(self, deep=True):
'''Get parameters for this estimator.
# Arguments
deep: boolean, optional
If True, will return the parameters for this estimator and
contained sub-objects that are estimators.
# Returns
params : dict
Dictionary of parameter names mapped to their values.
'''
res = copy.deepcopy(self.sk_params)
res.update({'build_fn': self.build_fn})
return res
def set_params(self, **params):
'''Set the parameters of this estimator.
# Arguments
params: dict
Dictionary of parameter names mapped to their values.
# Returns
self
'''
self.sk_params.update(params)
return self
def fit(self, X, y, **kwargs):
'''Construct a new model with build_fn and fit the model according
to the given training data.
# Arguments
X : array-like, shape `(n_samples, n_features)`
Training samples where n_samples in the number of samples
and n_features is the number of features.
y : array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`
True labels for X.
kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.fit`
# Returns
history : object
details about the training history at each epoch.
'''
if self.build_fn is None:
self.model = self.__call__(**self.filter_sk_params(self.__call__))
elif not isinstance(self.build_fn, types.FunctionType):
self.model = self.build_fn(
**self.filter_sk_params(self.build_fn.__call__))
else:
self.model = self.build_fn(**self.filter_sk_params(self.build_fn))
if self.model.loss.__name__ == 'categorical_crossentropy' and len(y.shape) != 2:
y = to_categorical(y)
fit_args = copy.deepcopy(self.filter_sk_params(Sequential.fit))
fit_args.update(kwargs)
history = self.model.fit(X, y, **fit_args)
return history
def filter_sk_params(self, fn, override={}):
'''Filter sk_params and return those in fn's arguments
# Arguments
fn : arbitrary function
override: dictionary, values to overrid sk_params
# Returns
res : dictionary dictionary containing variabls
in both sk_params and fn's arguments.
'''
res = {}
fn_args = inspect.getargspec(fn)[0]
for name, value in self.sk_params.items():
if name in fn_args:
res.update({name: value})
res.update(override)
return res
class KerasClassifier(BaseWrapper):
'''Implementation of the scikit-learn classifier API for Keras.
'''
def predict(self, X, **kwargs):
'''Returns the class predictions for the given test data.
# Arguments
X: array-like, shape `(n_samples, n_features)`
Test samples where n_samples in the number of samples
and n_features is the number of features.
kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.predict_classes`.
# Returns
preds: array-like, shape `(n_samples,)`
Class predictions.
'''
kwargs = self.filter_sk_params(Sequential.predict_classes, kwargs)
return self.model.predict_classes(X, **kwargs)
def predict_proba(self, X, **kwargs):
'''Returns class probability estimates for the given test data.
# Arguments
X: array-like, shape `(n_samples, n_features)`
Test samples where n_samples in the number of samples
and n_features is the number of features.
kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.predict_classes`.
# Returns
proba: array-like, shape `(n_samples, n_outputs)`
Class probability estimates.
'''
kwargs = self.filter_sk_params(Sequential.predict_proba, kwargs)
return self.model.predict_proba(X, **kwargs)
def score(self, X, y, **kwargs):
'''Returns the mean accuracy on the given test data and labels.
# Arguments
X: array-like, shape `(n_samples, n_features)`
Test samples where n_samples in the number of samples
and n_features is the number of features.
y: array-like, shape `(n_samples,)` or `(n_samples, n_outputs)`
True labels for X.
kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.evaluate`.
# Returns
score: float
Mean accuracy of predictions on X wrt. y.
'''
kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)
kwargs.update({'show_accuracy': True})
loss, accuracy = self.model.evaluate(X, y, **kwargs)
return accuracy
class KerasRegressor(BaseWrapper):
'''Implementation of the scikit-learn regressor API for Keras.
'''
def predict(self, X, **kwargs):
'''Returns predictions for the given test data.
# Arguments
X: array-like, shape `(n_samples, n_features)`
Test samples where n_samples in the number of samples
and n_features is the number of features.
kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.predict`.
# Returns
preds: array-like, shape `(n_samples,)`
Predictions.
'''
kwargs = self.filter_sk_params(Sequential.predict, kwargs)
return self.model.predict(X, **kwargs)
def score(self, X, y, **kwargs):
'''Returns the mean accuracy on the given test data and labels.
# Arguments
X: array-like, shape `(n_samples, n_features)`
Test samples where n_samples in the number of samples
and n_features is the number of features.
y: array-like, shape `(n_samples,)`
True labels for X.
kwargs: dictionary arguments
Legal arguments are the arguments of `Sequential.evaluate`.
# Returns
score: float
Mean accuracy of predictions on X wrt. y.
'''
kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)
kwargs.update({'show_accuracy': False})
loss = self.model.evaluate(X, y, **kwargs)
return loss
| |
import csv
import logging
import os
import eartime_table
import wrangler
logger = logging.getLogger('meeting_parser').getChild(__name__)
def find_header_match(keys, candidates):
#todo need to strip lower both
for key in candidates:
key = key.strip().lower()
try:
keys.remove(key)
except ValueError:
continue
else:
return key
return None
def csv_identify(db_pathname, csv_pathname, header, rows):
t = eartime_table.Table('', [header])
discards = 0
if t.tabletype == 'Unknown':
logger.error('Aborted csv parse because of unrecognised header: ' + str(header))
discards = rows
wrangler.add_file_to_db(db_pathname, csv_pathname, discards)
def header_type(header):
t = eartime_table.Table('', [header.split(',')])
return t.tabletype
def find_header1():
header_detected = csv.Sniffer().has_header(sample)
skips = 0
seek_dist = 0
while not header_detected and (skips < 10):
skips += 1
try:
header_detected = csv.Sniffer().has_header(sample.split('\n', maxsplit=skips)[-1])
except csv.Error:
logger.debug('csv parse issue')
header_detected = False
break
if header_detected:
header_str = sample
if skips > 0:
header_str = sample.split('\n', maxsplit=skips + 1)[-2]
logger.info('found a header ' + header_str)
# logger.warning('CSV file missing header row, might be a format issue')
# fieldnames = ['Date of meeting', 'Minister', 'Name of organisation', 'Purpose of meeting']
csvfile.seek(0)
#skipped = 0
##for line in csvfile:
# skipped += 1
# seek_dist += len(line)
# if skipped == skips:
# break
#csvfile.seek(seek_dist)
if header_detected:
for i in range(skips):
next(csvfile)
def find_header(csvfile):
skips = 0
for row in csvfile:
if header_type(row) == 'meeting':
break
else:
skips += 1
if skips > 10:
break
csvfile.seek(0)
if skips <= 10:
for i in range(skips):
next(csvfile)
def next_header(csvfile):
skips = 0
for row in csvfile:
if header_type(row) == 'meeting':
break
else:
skips += 1
if skips > 10:
break
csvfile.seek(0)
if skips <= 10:
for i in range(skips):
next(csvfile)
def extract_tables(csv_pathname):
tables = []
tables.append([])
try:
with open(csv_pathname, 'r') as csvfile:
for row in csvfile:
if header_type(row) != 'Unknown':
tables.append([row.rstrip(' ,\n')])
else:
tables[-1].append(row.rstrip(' ,\n'))
except UnicodeDecodeError:
logger.error("Can't decode csv file")
return tables
def process(db_pathname, csv_pathname):
logger.info('Starting to process csv ' + csv_pathname)
dept = os.path.basename(os.path.dirname(csv_pathname))
tables = extract_tables(csv_pathname)
discards = 0
db_rows = []
for index, t in enumerate(tables):
#for t in tables:
if len(t) > 0 and header_type(t[0]) == 'meeting':
try:
dialect = csv.Sniffer().sniff(str(t))
except csv.Error:
logger.error('Aborted csv parse because of unrecognised format: ' + csv_pathname)
discards += len(t)
continue
fieldnames = None
reader = csv.DictReader(t, fieldnames, dialect)
if reader.fieldnames is None:
# TODO problem with bis-ministerial-gifts-april-to-june.csv
discards += len(t)
continue
reader.fieldnames = list(map(lambda s: s.strip().lower(), reader.fieldnames))
csv_keys = reader.fieldnames.copy()
keymap = {}
# complete generic method taking the csv_keys and the list of candidate lists
rep = 'Unknown'
keymap['rep'] = find_header_match(csv_keys, eartime_table.rep_keys)
if keymap['rep'] is None and index > 0 and len(tables[index - 1]) > 0:
if len(tables[index - 1][-1].split('"')) > 1:
rep = tables[index - 1][-1].split('"')[1]
else:
rep = tables[index - 1][-1].split(',')[0]
if rep is None or rep == '':
if len(tables[index - 1][-2].split('"')) > 1:
rep = tables[index - 1][-2].split('"')[1]
else:
rep = tables[index - 1][-2].split(',')[0]
if rep is None or rep == '':
rep = 'Unknown'
keymap['date'] = find_header_match(csv_keys, eartime_table.date_keys)
if keymap['date'] is None:
discards += len(t)
continue
keymap['org'] = find_header_match(csv_keys, eartime_table.org_keys)
if keymap['org'] is None:
discards += len(t)
continue
keymap['meet'] = find_header_match(csv_keys, eartime_table.meet_keys)
if keymap['meet'] is None:
discards += len(t)
continue
for row in reader:
#if all(map(lambda x: row[x] is None or row[x] == '', keymap.values())):
if keymap['rep'] is None or row[keymap['rep']] == '':
row[keymap['rep']] = rep
else:
rep = row[keymap['rep']]
if (row[keymap['date']] == '' or row[keymap['date']] is None) and (row[keymap['org']] == '' or row[keymap['org']] is None) and (row[keymap['meet']] == '' or row[keymap['meet']] is None):
# logger.debug('Discarded partial csv row: ' + str(row))
continue
elif row[keymap['date']] == '' and row[keymap['meet']] is None and len(db_rows) > 0:
db_rows[-1][3] += ', '
db_rows[-1][3] += row[keymap['org']]
elif (row[keymap['meet']] is None or row[keymap['meet']] == '') and (row[keymap['date']] is not None and len(row[keymap['date']]) > 0) and (row[keymap['org']] is not None and len(row[keymap['org']]) > 0):
db_rows.append([row[keymap['rep']], row[keymap['date']], row[keymap['org']], 'Not recorded by reporting department', dept])
elif (row[keymap['date']] is None or row[keymap['date']] == '') and (row[keymap['meet']] is not None and len(row[keymap['meet']]) > 0) and (row[keymap['org']] is not None and len(row[keymap['org']]) > 0) and len(db_rows) > 0:
db_rows.append([row[keymap['rep']], db_rows[-1][2], row[keymap['org']], row[keymap['meet']], dept])
elif row[keymap['date']] is None or row[keymap['rep']] is None or row[keymap['org']] is None:
logger.warning('Discarded partial csv row: ' + str(row))
discards += 1
elif row[keymap['date']] == '' or len(row[keymap['date']]) > 27:
logger.warning('Discarded partial csv row: ' + str(row))
discards += 1
else:
db_rows.append([row[keymap['rep']], row[keymap['date']], row[keymap['org']], row[keymap['meet']], dept])
file_id = wrangler.add_file_to_db(db_pathname, csv_pathname, discards)
for row in db_rows:
row.append(file_id)
return wrangler.insert_table_rows(db_pathname, db_rows)
def process2(db_pathname, csv_pathname):
logger.info('Starting to process csv ' + csv_pathname)
dept = os.path.basename(os.path.dirname(csv_pathname))
with open(csv_pathname) as csvfile:
discards = 0
sample = csvfile.read(1024)
csvfile.seek(0)
try:
dialect = csv.Sniffer().sniff(sample)
except csv.Error:
logger.error('Aborted csv parse because of unrecognised format: ' + csv_pathname)
wrangler.add_file_to_db(db_pathname, csv_pathname, -1)
return 0
fieldnames = None
find_header(csvfile)
reader = csv.DictReader(csvfile, fieldnames, dialect)
db_rows = []
if reader.fieldnames is None:
#TODO problem with bis-ministerial-gifts-april-to-june.csv
#csvfile.seek(0)
#csv_identify(db_pathname, csv_pathname, next(csvfile), discards)
wrangler.add_file_to_db(db_pathname, csv_pathname, discards)
return 0
reader.fieldnames = list(map(lambda s: s.strip().lower(), reader.fieldnames))
csv_keys = reader.fieldnames.copy()
keymap = {}
# complete generic method taking the csv_keys and the list of candidate lists
keymap['rep'] = find_header_match(csv_keys, eartime_table.rep_keys)
#if keymap['rep'] is None:
# for row in reader:
# discards += 1
# csv_identify(db_pathname, csv_pathname, reader.fieldnames, discards)
# return 0
keymap['date'] = find_header_match(csv_keys, eartime_table.date_keys)
if keymap['date'] is None:
for row in reader:
discards += 1
csv_identify(db_pathname, csv_pathname, reader.fieldnames, discards)
return 0
keymap['org'] = find_header_match(csv_keys, eartime_table.org_keys)
if keymap['org'] is None:
for row in reader:
discards += 1
csv_identify(db_pathname, csv_pathname, reader.fieldnames, discards)
return 0
keymap['meet'] = find_header_match(csv_keys, eartime_table.meet_keys)
if keymap['meet'] is None:
for row in reader:
discards += 1
csv_identify(db_pathname, csv_pathname, reader.fieldnames, discards)
return 0
#keymap['org'] = difflib.get_close_matches('organisation', csv_keys, n=1, cutoff=0.6)[0]
#csv_keys.remove(keymap['org'])
#keymap['date'] = difflib.get_close_matches('date', csv_keys, n=1, cutoff=0.3)[0]
#csv_keys.remove(keymap['date'])
#keymap['rep'] = difflib.get_close_matches('Minister', csv_keys, n=1, cutoff=0.6)[0]
#csv_keys.remove(keymap['rep'])
#keymap['meet'] = difflib.get_close_matches('Purpose', csv_keys, n=1, cutoff=0.6)[0]
rep = 'Unknown'
for row in reader:
#if all(map(lambda x: row[x] is None or row[x] == '', keymap.values())):
if keymap['rep'] is None or row[keymap['rep']] == '':
row[keymap['rep']] = rep
else:
rep = row[keymap['rep']]
if row[keymap['date']] == '' and row[keymap['org']] == '' and row[keymap['meet']] == '':
# logger.debug('Discarded partial csv row: ' + str(row))
continue
elif row[keymap['date']] == '' and row[keymap['meet']] == '':
db_rows[-1][3] += ', '
db_rows[-1][3] += row[keymap['org']]
elif row[keymap['date']] is None or row[keymap['rep']] is None or row[keymap['org']] is None or row[keymap['meet']] is None:
logger.warning('Discarded partial csv row: ' + str(row))
discards += 1
elif row[keymap['date']] == '' or len(row[keymap['date']]) > 20:
logger.warning('Discarded partial csv row: ' + str(row))
discards += 1
else:
db_rows.append([row[keymap['rep']], row[keymap['date']], row[keymap['org']], row[keymap['meet']], dept])
file_id = wrangler.add_file_to_db(db_pathname, csv_pathname, discards)
for row in db_rows:
row.append(file_id)
return wrangler.insert_table_rows(db_pathname, db_rows)
| |
from __future__ import division, print_function
import os
import shutil
import pytest
from tempfile import mkstemp, mkdtemp
from subprocess import Popen, PIPE
from distutils.errors import DistutilsError
from numpy.distutils import ccompiler, customized_ccompiler
from numpy.testing import assert_, assert_equal
from numpy.distutils.system_info import system_info, ConfigParser
from numpy.distutils.system_info import default_lib_dirs, default_include_dirs
def get_class(name, notfound_action=1):
"""
notfound_action:
0 - do nothing
1 - display warning message
2 - raise error
"""
cl = {'temp1': Temp1Info,
'temp2': Temp2Info
}.get(name.lower(), _system_info)
return cl()
simple_site = """
[ALL]
library_dirs = {dir1:s}{pathsep:s}{dir2:s}
libraries = {lib1:s},{lib2:s}
extra_compile_args = -I/fake/directory
runtime_library_dirs = {dir1:s}
[temp1]
library_dirs = {dir1:s}
libraries = {lib1:s}
runtime_library_dirs = {dir1:s}
[temp2]
library_dirs = {dir2:s}
libraries = {lib2:s}
extra_link_args = -Wl,-rpath={lib2:s}
rpath = {dir2:s}
"""
site_cfg = simple_site
fakelib_c_text = """
/* This file is generated from numpy/distutils/testing/test_system_info.py */
#include<stdio.h>
void foo(void) {
printf("Hello foo");
}
void bar(void) {
printf("Hello bar");
}
"""
def have_compiler():
""" Return True if there appears to be an executable compiler
"""
compiler = customized_ccompiler()
try:
cmd = compiler.compiler # Unix compilers
except AttributeError:
try:
if not compiler.initialized:
compiler.initialize() # MSVC is different
except (DistutilsError, ValueError):
return False
cmd = [compiler.cc]
try:
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
p.stdout.close()
p.stderr.close()
p.wait()
except OSError:
return False
return True
HAVE_COMPILER = have_compiler()
class _system_info(system_info):
def __init__(self,
default_lib_dirs=default_lib_dirs,
default_include_dirs=default_include_dirs,
verbosity=1,
):
self.__class__.info = {}
self.local_prefixes = []
defaults = {'library_dirs': '',
'include_dirs': '',
'runtime_library_dirs': '',
'rpath': '',
'src_dirs': '',
'search_static_first': "0",
'extra_compile_args': '',
'extra_link_args': ''}
self.cp = ConfigParser(defaults)
# We have to parse the config files afterwards
# to have a consistent temporary filepath
def _check_libs(self, lib_dirs, libs, opt_libs, exts):
"""Override _check_libs to return with all dirs """
info = {'libraries': libs, 'library_dirs': lib_dirs}
return info
class Temp1Info(_system_info):
"""For testing purposes"""
section = 'temp1'
class Temp2Info(_system_info):
"""For testing purposes"""
section = 'temp2'
class TestSystemInfoReading(object):
def setup(self):
""" Create the libraries """
# Create 2 sources and 2 libraries
self._dir1 = mkdtemp()
self._src1 = os.path.join(self._dir1, 'foo.c')
self._lib1 = os.path.join(self._dir1, 'libfoo.so')
self._dir2 = mkdtemp()
self._src2 = os.path.join(self._dir2, 'bar.c')
self._lib2 = os.path.join(self._dir2, 'libbar.so')
# Update local site.cfg
global simple_site, site_cfg
site_cfg = simple_site.format(**{
'dir1': self._dir1,
'lib1': self._lib1,
'dir2': self._dir2,
'lib2': self._lib2,
'pathsep': os.pathsep
})
# Write site.cfg
fd, self._sitecfg = mkstemp()
os.close(fd)
with open(self._sitecfg, 'w') as fd:
fd.write(site_cfg)
# Write the sources
with open(self._src1, 'w') as fd:
fd.write(fakelib_c_text)
with open(self._src2, 'w') as fd:
fd.write(fakelib_c_text)
# We create all class-instances
def site_and_parse(c, site_cfg):
c.files = [site_cfg]
c.parse_config_files()
return c
self.c_default = site_and_parse(get_class('default'), self._sitecfg)
self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg)
self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg)
def teardown(self):
# Do each removal separately
try:
shutil.rmtree(self._dir1)
except Exception:
pass
try:
shutil.rmtree(self._dir2)
except Exception:
pass
try:
os.remove(self._sitecfg)
except Exception:
pass
def test_all(self):
# Read in all information in the ALL block
tsi = self.c_default
assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2])
assert_equal(tsi.get_libraries(), [self._lib1, self._lib2])
assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1])
extra = tsi.calc_extra_info()
assert_equal(extra['extra_compile_args'], ['-I/fake/directory'])
def test_temp1(self):
# Read in all information in the temp1 block
tsi = self.c_temp1
assert_equal(tsi.get_lib_dirs(), [self._dir1])
assert_equal(tsi.get_libraries(), [self._lib1])
assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1])
def test_temp2(self):
# Read in all information in the temp2 block
tsi = self.c_temp2
assert_equal(tsi.get_lib_dirs(), [self._dir2])
assert_equal(tsi.get_libraries(), [self._lib2])
# Now from rpath and not runtime_library_dirs
assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2])
extra = tsi.calc_extra_info()
assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2])
@pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler")
def test_compile1(self):
# Compile source and link the first source
c = customized_ccompiler()
previousDir = os.getcwd()
try:
# Change directory to not screw up directories
os.chdir(self._dir1)
c.compile([os.path.basename(self._src1)], output_dir=self._dir1)
# Ensure that the object exists
assert_(os.path.isfile(self._src1.replace('.c', '.o')) or
os.path.isfile(self._src1.replace('.c', '.obj')))
finally:
os.chdir(previousDir)
@pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler")
@pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()),
reason="Fails with MSVC compiler ")
def test_compile2(self):
# Compile source and link the second source
tsi = self.c_temp2
c = customized_ccompiler()
extra_link_args = tsi.calc_extra_info()['extra_link_args']
previousDir = os.getcwd()
try:
# Change directory to not screw up directories
os.chdir(self._dir2)
c.compile([os.path.basename(self._src2)], output_dir=self._dir2,
extra_postargs=extra_link_args)
# Ensure that the object exists
assert_(os.path.isfile(self._src2.replace('.c', '.o')))
finally:
os.chdir(previousDir)
| |
from landlab import Component
from ...utils.decorators import use_file_name_or_kwds
import numpy as np
_VALID_METHODS = set(['Grid'])
def _assert_method_is_valid(method):
if method not in _VALID_METHODS:
raise ValueError('%s: Invalid method name' % method)
class Radiation(Component):
"""Compute 1D and 2D total incident shortwave radiation.
Landlab component that computes 1D and 2D total incident shortwave
radiation. This code also computes relative incidence shortwave radiation
compared to a flat surface.
.. codeauthor:: Sai Nudurupati & Erkan Istanbulluoglu
Construction::
Radiation(grid, method='Grid', cloudiness=0.2, latitude=34.,
albedo=0.2, solar_constant=1366.67,
clearsky_turbidity=2., opt_airmass=0.)
Parameters
----------
grid: RasterModelGrid
A grid.
method: {'Grid'}, optional
Currently, only default is available.
cloudiness: float, optional
Cloudiness.
latitude: float, optional
Latitude (radians).
albedo: float, optional
Albedo.
solar_constant: float, optional
Solar Constant (W/m^2).
clearsky_turbidity: float, optional
Clear sky turbidity.
opt_airmass: float, optional
Optical air mass.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.components import Radiation
>>> import numpy as np
>>> grid = RasterModelGrid((5, 4), spacing=(0.2, 0.2))
>>> rad = Radiation(grid)
>>> rad.name
'Radiation'
>>> rad.input_var_names
('topographic__elevation',)
>>> sorted(rad.output_var_names) # doctest: +NORMALIZE_WHITESPACE
['radiation__incoming_shortwave_flux',
'radiation__net_shortwave_flux',
'radiation__ratio_to_flat_surface']
>>> sorted(rad.units) # doctest: +NORMALIZE_WHITESPACE
[('radiation__incoming_shortwave_flux', 'W/m^2'),
('radiation__net_shortwave_flux', 'W/m^2'),
('radiation__ratio_to_flat_surface', 'None'),
('topographic__elevation', 'm')]
>>> rad.grid.number_of_node_rows
5
>>> rad.grid.number_of_node_columns
4
>>> rad.grid is grid
True
>>> np.all(grid.at_cell['radiation__ratio_to_flat_surface'] == 0.)
True
>>> np.all(grid.at_node['topographic__elevation'] == 0.)
True
>>> grid['node']['topographic__elevation'] = np.array([
... 0., 0., 0., 0.,
... 1., 1., 1., 1.,
... 2., 2., 2., 2.,
... 3., 4., 4., 3.,
... 4., 4., 4., 4.])
>>> current_time = 0.5
>>> rad.update(current_time)
>>> np.all(grid.at_cell['radiation__ratio_to_flat_surface'] == 0.)
False
"""
_name = 'Radiation'
_input_var_names = (
'topographic__elevation',
)
_output_var_names = (
'radiation__incoming_shortwave_flux',
'radiation__ratio_to_flat_surface',
'radiation__net_shortwave_flux',
)
_var_units = {
'topographic__elevation': 'm',
'radiation__incoming_shortwave_flux': 'W/m^2',
'radiation__ratio_to_flat_surface': 'None',
'radiation__net_shortwave_flux': 'W/m^2',
}
_var_mapping = {
'topographic__elevation': 'node',
'radiation__incoming_shortwave_flux': 'cell',
'radiation__ratio_to_flat_surface': 'cell',
'radiation__net_shortwave_flux': 'cell',
}
_var_doc = {
'topographic__elevation':
'elevation of the ground surface relative to some datum',
'radiation__incoming_shortwave_flux':
'total incident shortwave radiation over the time step',
'radiation__ratio_to_flat_surface':
'ratio of total incident shortwave radiation on sloped surface \
to flat surface',
'radiation__net_shortwave_flux':
'net incident shortwave radiation over the time step',
}
@use_file_name_or_kwds
def __init__(self, grid, method='Grid', cloudiness=0.2,
latitude=34., albedo=0.2, solar_constant=1366.67,
clearsky_turbidity=2., opt_airmass=0., **kwds):
"""
Parameters
----------
grid : RasterModelGrid
A grid.
method : {'Grid'}, optional
Currently, only default is available.
cloudiness: float, optional
Cloudiness.
latitude: float, optional
Latitude (Radians).
albedo: float, optional
Albedo.
solar_constant: float, optional
Solar Constant (W/m^2).
clearsky_turbidity: float, optional
Clear sky turbidity.
opt_airmass: float, optional
Optical air mass.
"""
self._method = method
self._N = cloudiness
self._latitude = latitude
self._A = albedo
self._Io = solar_constant
self._n = clearsky_turbidity
self._m = opt_airmass
_assert_method_is_valid(self._method)
super(Radiation, self).__init__(grid, **kwds)
for name in self._input_var_names:
if name not in self.grid.at_node:
self.grid.add_zeros(name, at='node',
units=self._var_units[name])
for name in self._output_var_names:
if name not in self.grid.at_cell:
self.grid.add_zeros(name, at='cell',
units=self._var_units[name])
if 'Slope' not in self.grid.at_cell:
self.grid.add_zeros('Slope', at='cell', units='radians')
if 'Aspect' not in self.grid.at_cell:
self.grid.add_zeros('Aspect', at='cell', units='radians')
self._nodal_values = self.grid['node']
self._cell_values = self.grid['cell']
self._slope, self._aspect = \
grid.calculate_slope_aspect_at_nodes_burrough(
vals='topographic__elevation')
# self._slope = grid.calc_slope_of_node( \
# elevs = 'topographic__elevation')
# self._aspect =
self._cell_values['Slope'] = self._slope
self._cell_values['Aspect'] = self._aspect
def update(self, current_time, hour=12., **kwds):
"""Update fields with current loading conditions.
Parameters
----------
current_time: float
Current time (years).
hour: float, optional
Hour of the day.
"""
self._t = hour
self._radf = self._cell_values['radiation__ratio_to_flat_surface']
self._Rs = self._cell_values['radiation__incoming_shortwave_flux']
self._Rnet = self._cell_values['radiation__net_shortwave_flux']
self._julian = np.floor((current_time - np.floor(current_time)) *
365.25) # Julian day
self._phi = np.radians(self._latitude) # Latitude in Radians
self._delta = 23.45 * np.radians(
np.cos(2*np.pi / 365 * (172 - self._julian))) # Declination angle
self._tau = (self._t + 12.0) * np.pi / 12.0 # Hour angle
self._alpha = np.arcsin(np.sin(self._delta) * np.sin(self._phi) +
np.cos(self._delta) * np.cos(self._phi) *
np.cos(self._tau)) # Solar Altitude
if self._alpha <= 0.25 * np.pi / 180.0: # If altitude is -ve,
self._alpha = 0.25 * np.pi / 180.0 # sun is beyond the horizon
self._Rgl = (self._Io * np.exp((-1) * self._n * (
0.128 - 0.054 * np.log10(1. / np.sin(self._alpha)))*(
1. / np.sin(self._alpha))))
# Counting for Albedo, Cloudiness and Atmospheric turbidity
self._phisun = (np.arctan(- np.sin(self._tau) / (np.tan(self._delta) *
np.cos(self._phi) - np.sin(self._phi) *
np.cos(self._tau)))) # Sun's Azhimuth
if (self._phisun >= 0 and - np.sin(self._tau) <= 0):
self._phisun = self._phisun + np.pi
elif (self._phisun <= 0 and - np.sin(self._tau) >= 0):
self._phisun = self._phisun + np.pi
self._flat = (np.cos(np.arctan(0)) * np.sin(self._alpha) +
np.sin(np.arctan(0)) * np.cos(self._alpha) *
np.cos(self._phisun - 0)) # flat surface reference
self._Rsflat = self._Rgl * self._flat
# flat surface total incoming shortwave radiation
self._Rnetflat = ((1 - self._A) * (1 - 0.65 * (self._N ** 2)) *
self._Rsflat)
# flat surface Net incoming shortwave radiation
self._sloped = (np.cos(self._slope) * np.sin(self._alpha) +
np.sin(self._slope) * np.cos(self._alpha) *
np.cos(self._phisun - self._aspect))
self._radf = self._sloped / self._flat
self._radf[self._radf <= 0.] = 0.
self._radf[self._radf > 6.] = 6.
self._Rs = self._Rsflat * self._radf
# Sloped surface Toatl Incoming Shortwave Radn
self._Rnet = self._Rnetflat * self._radf
self._cell_values['radiation__ratio_to_flat_surface'] = self._radf
self._cell_values['radiation__incoming_shortwave_flux'] = self._Rs
self._cell_values['radiation__net_shortwave_flux'] = self._Rnet
| |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parses a JSON request log created by log_requests.py."""
import collections
import json
import operator
import urlparse
Timing = collections.namedtuple(
'Timing',
['connectEnd', 'connectStart', 'dnsEnd', 'dnsStart', 'proxyEnd',
'proxyStart', 'receiveHeadersEnd', 'requestTime', 'sendEnd', 'sendStart',
'sslEnd', 'sslStart', 'workerReady', 'workerStart', 'loadingFinished'])
class Resource(object):
"""Describes a resource."""
def __init__(self, url, content_type):
"""Creates an instance of Resource.
Args:
url: URL of the resource
content_type: Content-Type of the resources.
"""
self.url = url
self.content_type = content_type
def GetShortName(self):
"""Returns either the hostname of the resource, or the filename,
or the end of the path. Tries to include the domain as much as possible.
"""
parsed = urlparse.urlparse(self.url)
path = parsed.path
if path != '' and path != '/':
last_path = parsed.path.split('/')[-1]
if len(last_path) < 10:
if len(path) < 10:
return parsed.hostname + '/' + path
else:
return parsed.hostname + '/..' + parsed.path[-10:]
elif len(last_path) > 10:
return parsed.hostname + '/..' + last_path[:5]
else:
return parsed.hostname + '/..' + last_path
else:
return parsed.hostname
def GetContentType(self):
mime = self.content_type
if 'magic-debug-content' in mime:
# A silly hack to make the unittesting easier.
return 'magic-debug-content'
elif mime == 'text/html':
return 'html'
elif mime == 'text/css':
return 'css'
elif mime in ('application/x-javascript', 'text/javascript',
'application/javascript'):
return 'script'
elif mime == 'application/json':
return 'json'
elif mime == 'image/gif':
return 'gif_image'
elif mime.startswith('image/'):
return 'image'
else:
return 'other'
@classmethod
def FromRequest(cls, request):
"""Creates a Resource from an instance of RequestData."""
return Resource(request.url, request.GetContentType())
def __Fields(self):
return (self.url, self.content_type)
def __eq__(self, o):
return self.__Fields() == o.__Fields()
def __hash__(self):
return hash(self.__Fields())
class RequestData(object):
"""Represents a request, as dumped by log_requests.py."""
def __init__(self, status, headers, request_headers, timestamp, timing, url,
served_from_cache, initiator):
self.status = status
self.headers = headers
self.request_headers = request_headers
self.timestamp = timestamp
self.timing = Timing(**timing) if timing else None
self.url = url
self.served_from_cache = served_from_cache
self.initiator = initiator
def IsDataUrl(self):
return self.url.startswith('data:')
def GetContentType(self):
content_type = self.headers['Content-Type']
if ';' in content_type:
return content_type[:content_type.index(';')]
else:
return content_type
@classmethod
def FromDict(cls, r):
"""Creates a RequestData object from a dict."""
return RequestData(r['status'], r['headers'], r['request_headers'],
r['timestamp'], r['timing'], r['url'],
r['served_from_cache'], r['initiator'])
def ParseJsonFile(filename):
"""Converts a JSON file to a sequence of RequestData."""
with open(filename) as f:
json_data = json.load(f)
return [RequestData.FromDict(r) for r in json_data]
def FilterRequests(requests):
"""Filters a list of requests.
Args:
requests: [RequestData, ...]
Returns:
A list of requests that are not data URL, have a Content-Type, and are
not served from the cache.
"""
return [r for r in requests if not r.IsDataUrl()
and 'Content-Type' in r.headers and not r.served_from_cache]
def ResourceToRequestMap(requests):
"""Returns a Resource -> Request map.
A resource can be requested several times in a single page load. Keeps the
first request in this case.
Args:
requests: [RequestData, ...]
Returns:
[Resource, ...]
"""
# reversed(requests) because we want the first one to win.
return dict([(Resource.FromRequest(r), r) for r in reversed(requests)])
def GetResources(requests):
"""Returns an ordered list of resources from a list of requests.
The same resource can be requested several time for a single page load. This
keeps only the first request.
Args:
requests: [RequestData]
Returns:
[Resource]
"""
resources = []
known_resources = set()
for r in requests:
resource = Resource.FromRequest(r)
if r in known_resources:
continue
known_resources.add(resource)
resources.append(resource)
return resources
def ParseCacheControl(headers):
"""Parses the "Cache-Control" header and returns a dict representing it.
Args:
headers: (dict) Response headers.
Returns:
{Directive: Value, ...}
"""
# TODO(lizeb): Handle the "Expires" header as well.
result = {}
cache_control = headers.get('Cache-Control', None)
if cache_control is None:
return result
directives = [s.strip() for s in cache_control.split(',')]
for directive in directives:
parts = [s.strip() for s in directive.split('=')]
if len(parts) == 1:
result[parts[0]] = True
else:
result[parts[0]] = parts[1]
return result
def MaxAge(request):
"""Returns the max-age of a resource, or -1."""
cache_control = ParseCacheControl(request.headers)
if (u'no-store' in cache_control
or u'no-cache' in cache_control
or len(cache_control) == 0):
return -1
if 'max-age' in cache_control:
return int(cache_control['max-age'])
return -1
def SortedByCompletion(requests):
"""Returns the requests, sorted by completion time."""
return sorted(requests, key=operator.attrgetter('timestamp'))
| |
import pygame
import time
import math
from pygame.locals import *
class Colours(object):
white = (255, 255, 255)
light_gray = (128, 128, 128)
med_gray = (64, 64, 64)
gray = (32, 32, 32)
black = (0, 0, 0)
sepia = (120, 100, 82)
red = (200, 17, 55)
green = (44, 160, 90)
blue = (0, 0, 255)
electric_blue = (0, 191, 255)
class Pannel(object):
def __init__(self, display):
self.x, self.y = 0, 0
self.widgets = []
self.display = display
self.debounce = 0
self.debounce_time = 5
self.offsetX, self.offsetY = 0, 0
def addWidget(self, widgetClass, *args, **kw):
kw['parent'] = self
kw['display'] = self.display
w = widgetClass(*args, **kw)
self.widgets.append(w)
return w
def sendEvent(self, event):
if event.type in [MOUSEBUTTONUP, MOUSEBUTTONDOWN] and event.button==1:
now = time.time() * 1000
if (now - self.debounce) < self.debounce_time:
return False
if event.type == MOUSEBUTTONUP:
self.debounce = 0
if event.type == MOUSEBUTTONDOWN:
self.debounce = now
x,y = event.pos
for w in reversed(self.widgets):
if w.touch and w.inside(x, y):
w.touched()
break
def draw(self):
for w in self.widgets:
w.draw()
self.display.flip()
def update(self):
fl = False
for w in self.widgets:
r = w.update()
fl = fl or r
if fl:
self.display.flip()
class Widget(object):
touch = False
def __init__(self, x, y, w=1, h=1, display=None, parent=None):
self.x, self.y = x, y
self.w, self.h = w, h
self.display = None
self.surf = None
self.parent = parent
self.display = display
self.surf = display.display
self.offsetX = 0
self.offsetY = 0
def getXY(self):
return (
self.x + self.parent.x + self.parent.offsetX,
self.y + self.parent.y + self.parent.offsetY,
)
def update(self):
# Called on every display loop - must return True if re-render required
return False
def draw(self):
# Draw the initial widget
pass
def inside(self, x, y):
mx, my = self.getXY()
inx = (x > mx) and (x < (mx + self.w))
iny = (y > my) and (y < (my + self.h))
if inx and iny:
return True
else:
return False
def touched(self):
return False
class SevenSegment(Widget):
charMap = {
'0': (True, True, True, True, True, True, False),
'1': (False, True, True, False, False, False, False),
'2': (True, True, False, True, True, False, True),
'3': (True, True, True, True, False, False, True),
'4': (False, True, True, False, False, True, True),
'5': (True, False, True, True, False, True, True),
'6': (True, False, True, True, True, True, True),
'7': (True, True, True, False, False, False, False),
'8': (True, True, True, True, True, True, True),
'9': (True, True, True, False, False, True, True)
}
def __init__(self, x, y, w, h, value=0, digits=2, msd=1, colour=Colours.red, digit_pad=5, **kw):
Widget.__init__(self, x, y, w, h, **kw)
self.digits = digits
self.colour = colour
self.lastV = self.value = value
self.digit_pad = digit_pad
self.msd = msd
self.constructSurface()
def constructSurface(self):
self.digit = pygame.Surface((self.w, self.h))
self.dw = int((self.w / self.digits) - self.digit_pad)
dh = (self.h / 2) - self.digit_pad
# Horizontal segment
self.h_dark = pygame.Surface((self.dw - 10, 10), pygame.SRCALPHA)
self.h_light = pygame.Surface((self.dw - 10, 10), pygame.SRCALPHA)
h_shape = [
(0, 5),
(5, 0),
(self.dw - 15, 0),
(self.dw - 10, 5),
(self.dw - 15, 10),
(5, 10)
]
pygame.draw.polygon(self.h_dark, Colours.gray, h_shape)
pygame.draw.polygon(self.h_light, self.colour, h_shape)
self.v_dark = pygame.Surface((10, dh), pygame.SRCALPHA)
self.v_light = pygame.Surface((10, dh), pygame.SRCALPHA)
v_shape = [
(5, 0),
(10, 5),
(10, dh - 15),
(5, dh - 10),
(0, dh - 15),
(0, 5),
]
pygame.draw.polygon(self.v_dark, Colours.gray, v_shape)
pygame.draw.polygon(self.v_light, self.colour, v_shape)
for i in range(self.digits):
x = (self.dw + self.digit_pad) * i
# Horizontal segments
self.digit.blit(self.h_dark, (x+5, 0))
self.digit.blit(self.h_dark, (x+5, (self.h/2) - 5))
self.digit.blit(self.h_dark, (x+5, self.h - 10))
# Vertical segments
self.digit.blit(self.v_dark, (x, 10))
self.digit.blit(self.v_dark, (x+self.dw-10, 10))
self.digit.blit(self.v_dark, (x, 5+self.h/2))
self.digit.blit(self.v_dark, (x+self.dw-10, 5+self.h/2))
def lightSegments(self):
panel = pygame.Surface((self.w, self.h), pygame.SRCALPHA)
dst = (self.h_dark, self.h_light, self.v_dark, self.v_light)
v = "%f" % self.value
nd = len(v.split('.')[0])
if nd < self.msd:
v = '0'*(self.msd - nd) + v
cn = 0
for c in v:
if cn >= self.digits:
break
if (c == '.') and (cn <= self.digits):
pygame.draw.circle(panel, self.colour, (x + self.dw + 2, self.h-5), 5)
continue
if c in self.charMap:
x = (self.dw + self.digit_pad) * cn
cn += 1
segments = [
(0, (x+5, 0)),
(2, (x+self.dw-10, 10)),
(2, (x+self.dw-10, 5 + self.h/2)),
(0, (x+5, self.h - 10)),
(2, (x, 5+self.h/2)),
(2, (x, 10)),
(0, (x+5, (self.h/2) - 5)),
]
for i, s in enumerate(self.charMap[c]):
if s:
args = (dst[segments[i][0] + 1], segments[i][1])
else:
args = (dst[segments[i][0]], segments[i][1])
panel.blit(*args)
return panel
def draw(self):
x, y = self.getXY()
if self.digit:
self.display.blit(self.digit, (x, y))
self.display.blit(self.lightSegments(), (x, y))
def update(self):
if self.value != self.lastV:
self.draw()
self.lastV = self.value
return True
class FancyGauge(Widget):
def __init__(self, x, y, r, showPercentage = False, valueFormat = "%d",
colour=Colours.green, units = None, maxScale = 25,
touched=None, **kw):
Widget.__init__(self, x, y, **kw)
self.r = r
self.h = self.w = (2 * r) + 2
self.value = 0
self.lastV = self.value
self.valueFormat = valueFormat
self.showPercentage = showPercentage
self.colour = colour
self.units = units
self.maxScale = maxScale
self.callback = touched
self.constructSurface()
def constructSurface(self):
self.meter = pygame.Surface((self.h, self.w))
pygame.draw.circle(self.meter, Colours.gray, (int(self.w/2), int(self.h/2)), self.r, int(self.r*0.25))
self.valueFont = pygame.font.Font('carlito.ttf', int(self.r*0.5))
if self.units:
unitFont = pygame.font.Font('carlito.ttf', int(self.r*0.30))
w, h = unitFont.size(self.units)
units = unitFont.render(self.units, True, Colours.light_gray)
self.meter.blit(units, ((self.w / 2) - (w/2), ((self.h/2) - (h/2)) + h))
def arcSlice(self, center, rad1, rad2, angle):
arc1 = []
arc2 = []
pi = math.pi
for n in range(-90,angle-90):
# Trig is expensive, do it once.
cs = math.cos(n*pi/180)
ss = math.sin(n*pi/180)
arc1.append((center[0] + int(rad1 * cs), center[1] + int(rad1 * ss)))
arc2.insert(0, (center[0] + int(rad2 * cs), center[1] + int(rad2 * ss)))
if not arc1:
return []
arc2.extend(arc1[0])
arc1.extend(arc2)
return arc1
def draw(self):
submeter = pygame.Surface((self.h, self.w))
submeter.blit(self.meter, (0,0))
# Draw value text
pv = self.value / self.maxScale
if pv > 1:
pv = 1
if self.showPercentage:
vt = "%d%%" % int(pv*100)
else:
vt = self.valueFormat % self.value
w, h = self.valueFont.size(vt)
val = self.valueFont.render(vt, True, self.colour)
submeter.blit(val, ((self.w / 2) - (w/2), (self.h/2) - (h/2) - 5))
# Draw the value arc
if (pv > 0):
sm2 = pygame.Surface((self.h, self.w), pygame.SRCALPHA)
arcSliceD = int(math.ceil(360 * pv))
poly = self.arcSlice((int(self.w/2), int(self.h/2)), self.r, self.r*0.75, arcSliceD)
if poly:
pygame.draw.polygon(sm2, self.colour, poly)
submeter.blit(sm2, (0, 0))
x, y = self.getXY()
self.display.blit(submeter, (x, y))
def update(self):
if (self.value != self.lastV) and (self.value <= self.maxScale):
self.draw()
self.lastV = self.value
return True
def touched(self):
if self.callback:
self.callback()
class OldSchoolMeter(Widget):
def __init__(self, x, y, maxScale = 25, **kw):
Widget.__init__(self, x, y, **kw)
self.h, self.w = 100, 146
self.maxScale = maxScale
self.value = 0
self.lastV = self.value
self.constructSurface()
def constructSurface(self):
self.meter = pygame.image.load('images/meter-bg.png').convert()
font = pygame.font.Font('carlito.ttf', 12)
# Calculate the increment between text
segments = math.ceil(self.maxScale / 6.0)
r = 60.0
# Render display markings
for i in range(6):
text = font.render(str(i*segments), True, Colours.sepia)
a = float(50.0 - (i * 20.0))
text = pygame.transform.rotate(text, a)
aR = (a+90) * (math.pi / 180.0)
self.meter.blit(text, (67 + int(math.cos(aR) * (r*1.05)), 92 - int(math.sin(aR) * r)))
def update(self):
if self.value != self.lastV:
self.draw()
self.lastV = self.value
return True
def draw(self):
surf = pygame.Surface((self.w, self.h))
surf.blit(self.meter, (0, 0))
r1 = 70.0
r2 = 10.0
aR = (140 - ((self.value / self.maxScale) * 107)) * (math.pi / 180.0)
pygame.draw.aaline(surf, (43, 28, 18),
(72 + int(math.cos(aR) * (r1*1.05)), 92 - int(math.sin(aR) * r1)),
(72 + int(math.cos(aR) * (r2*1.05)), 92 - int(math.sin(aR) * r2)),
)
x, y = self.getXY()
self.display.blit(surf, (x, y))
class Button(Widget):
touch = True
def __init__(self, text, x, y, w, h, callback=None, **kw):
Widget.__init__(self, x, y, w, h, **kw)
self.text = text
self.callback = callback
def draw(self):
x, y = self.getXY()
btText = self.display.font.render(self.text, True, Colours.light_gray)
tw, th = self.display.font.size(self.text)
pygame.draw.rect(self.display.display, Colours.light_gray, (x, y, self.w, self.h), 1)
tx = (x + (self.w/2)) - (tw/2)
ty = (y + (self.h/2)) - (th/2)
self.display.blit(btText, (tx, ty))
def touched(self):
x, y = self.getXY()
pygame.draw.rect(self.surf, Colours.black,
(x, y, self.w, self.h), 1)
self.display.flip()
self.draw()
if self.callback:
self.callback()
class UpButton(Widget):
touch = True
def __init__(self, x, y, w, h, callback=None, border=0, colour=Colours.light_gray, **kw):
Widget.__init__(self, x, y, w, h, **kw)
self.callback = callback
self.colour = colour
self.border = border
def draw(self):
x, y = self.getXY()
pygame.draw.polygon(self.display.display, self.colour, [
[x + (self.w/2), y+2],
[x + self.w -2, y + self.h - 2],
[x + 2, y + self.h - 2]
], 0)
if self.border > 0:
print repr(self.border)
pygame.draw.rect(self.display.display, Colours.med_gray,
(x, y, self.w, self.h), self.border)
def touched(self):
if self.callback:
self.callback()
class DownButton(UpButton):
touch = True
def draw(self):
x, y = self.getXY()
pygame.draw.polygon(self.display.display, self.colour, [
[x + 2, y + 2],
[x + self.w - 2, y + 2],
[x + (self.w/2), y + self.h - 2]
], 0)
if self.border > 0:
pygame.draw.rect(self.display.display, Colours.med_gray,
(x, y, self.w, self.h), self.border)
class ToggleButton(Widget):
touch = True
def __init__(self, text1, text2, x, y, w, h, colour1=Colours.green,
colour2=Colours.red, callback=None, **kw):
Widget.__init__(self, x, y, w, h, **kw)
self.text1 = text1
self.text2 = text2
self.callback = callback
self.state = False
self.colour1 = colour1
self.colour2 = colour2
def draw(self):
x, y = self.getXY()
if self.state:
colour = self.colour1
btText = self.display.font.render(self.text1, True, Colours.white)
tw, th = self.display.font.size(self.text1)
else:
colour = self.colour2
btText = self.display.font.render(self.text2, True, Colours.white)
tw, th = self.display.font.size(self.text2)
pygame.draw.rect(self.display.display, Colours.light_gray, (
x, y, self.w, self.h), 1)
pygame.draw.rect(self.display.display, colour, (
x + 1, y + 1, self.w - 2, self.h - 2), 0)
tx = (x + (self.w/2)) - (tw/2)
ty = (y + (self.h/2)) - (th/2)
self.display.blit(btText, (tx, ty+1))
def setState(self, state):
if self.state != state:
self.state = state
self.draw()
return True
return False
def touched(self):
self.state = not self.state
if self.callback:
self.callback(self.state)
self.draw()
class Frame(Widget):
def __init__(self, text, x, y, w, h, **kw):
Widget.__init__(self, x, y, w, h, **kw)
self.text = text
self.btText = self.display.font.render(self.text, True, Colours.light_gray)
self.tw, self.th = self.display.font.size(self.text)
self.offsetX = 1
self.offsetY = 4 + self.th
def draw(self):
x, y = self.getXY()
qw, qh = self.tw + 6, self.th + 4
pygame.draw.rect(self.display.display, Colours.med_gray, (x, y, qw, qh), 1)
pygame.draw.rect(self.display.display, Colours.med_gray, (x, y + qh, self.w, self.h - qh), 1)
self.display.blit(self.btText, (x + 3, y + 3))
def addWidget(self, widget, *a, **kw):
w = self.parent.addWidget(widget, *a, **kw)
w.parent = self
return w
| |
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The maths module provides higher-level interfaces to some of the operations
that can be performed with the fslmaths command-line program.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import print_function, division, unicode_literals, absolute_import
import os
import numpy as np
from ..base import (TraitedSpec, File, traits, InputMultiPath, isdefined)
from .base import FSLCommand, FSLCommandInputSpec
class MathsInput(FSLCommandInputSpec):
in_file = File(position=2, argstr="%s", exists=True, mandatory=True,
desc="image to operate on")
out_file = File(genfile=True, position=-2, argstr="%s",
desc="image to write", hash_files=False)
_dtypes = ["float", "char", "int", "short", "double", "input"]
internal_datatype = traits.Enum(*_dtypes, position=1, argstr="-dt %s",
desc=("datatype to use for calculations "
"(default is float)"))
output_datatype = traits.Enum(*_dtypes,
position=-1, argstr="-odt %s",
desc=("datatype to use for output (default "
"uses input type)"))
nan2zeros = traits.Bool(position=3, argstr='-nan',
desc='change NaNs to zeros before doing anything')
class MathsOutput(TraitedSpec):
out_file = File(exists=True, desc="image written after calculations")
class MathsCommand(FSLCommand):
_cmd = "fslmaths"
input_spec = MathsInput
output_spec = MathsOutput
_suffix = "_maths"
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["out_file"] = self.inputs.out_file
if not isdefined(self.inputs.out_file):
outputs["out_file"] = self._gen_fname(self.inputs.in_file,
suffix=self._suffix)
outputs["out_file"] = os.path.abspath(outputs["out_file"])
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()["out_file"]
return None
class ChangeDataTypeInput(MathsInput):
_dtypes = ["float", "char", "int", "short", "double", "input"]
output_datatype = traits.Enum(*_dtypes,
position=-1, argstr="-odt %s",
mandatory=True,
desc="output data type")
class ChangeDataType(MathsCommand):
"""Use fslmaths to change the datatype of an image.
"""
input_spec = ChangeDataTypeInput
_suffix = "_chdt"
class ThresholdInputSpec(MathsInput):
thresh = traits.Float(mandatory=True, position=4, argstr="%s",
desc="threshold value")
direction = traits.Enum("below", "above", usedefault=True,
desc="zero-out either below or above thresh value")
use_robust_range = traits.Bool(
desc="interpret thresh as percentage (0-100) of robust range")
use_nonzero_voxels = traits.Bool(
desc="use nonzero voxels to calculate robust range",
requires=["use_robust_range"])
class Threshold(MathsCommand):
"""Use fslmaths to apply a threshold to an image in a variety of ways.
"""
input_spec = ThresholdInputSpec
_suffix = "_thresh"
def _format_arg(self, name, spec, value):
if name == "thresh":
arg = "-"
_si = self.inputs
if self.inputs.direction == "above":
arg += "u"
arg += "thr"
if isdefined(_si.use_robust_range) and _si.use_robust_range:
if (isdefined(_si.use_nonzero_voxels) and
_si.use_nonzero_voxels):
arg += "P"
else:
arg += "p"
arg += " %.10f" % value
return arg
return super(Threshold, self)._format_arg(name, spec, value)
class StdImageInput(MathsInput):
dimension = traits.Enum("T", "X", "Y", "Z", usedefault=True,
argstr="-%sstd", position=4,
desc="dimension to standard deviate across")
class StdImage(MathsCommand):
"""Use fslmaths to generate a standard deviation in an image across a given
dimension.
"""
input_spec = StdImageInput
_suffix = "_std"
class MeanImageInput(MathsInput):
dimension = traits.Enum("T", "X", "Y", "Z", usedefault=True,
argstr="-%smean", position=4,
desc="dimension to mean across")
class MeanImage(MathsCommand):
"""Use fslmaths to generate a mean image across a given dimension.
"""
input_spec = MeanImageInput
_suffix = "_mean"
class MaxImageInput(MathsInput):
dimension = traits.Enum("T", "X", "Y", "Z", usedefault=True,
argstr="-%smax", position=4,
desc="dimension to max across")
class MaxImage(MathsCommand):
"""Use fslmaths to generate a max image across a given dimension.
Examples
--------
>>> from nipype.interfaces.fsl.maths import MaxImage
>>> maxer = MaxImage()
>>> maxer.inputs.in_file = "functional.nii" # doctest: +SKIP
>>> maxer.dimension = "T"
>>> maxer.cmdline # doctest: +SKIP
'fslmaths functional.nii -Tmax functional_max.nii'
"""
input_spec = MaxImageInput
_suffix = "_max"
class PercentileImageInput(MathsInput):
dimension = traits.Enum("T", "X", "Y", "Z", usedefault=True,
argstr="-%sperc", position=4,
desc="dimension to percentile across")
perc = traits.Range(low=0, high=100, usedefault=False,
argstr="%f", position=5,
desc=("nth percentile (0-100) of FULL RANGE "
"across dimension"))
class PercentileImage(MathsCommand):
"""Use fslmaths to generate a percentile image across a given dimension.
Examples
--------
>>> from nipype.interfaces.fsl.maths import MaxImage
>>> percer = PercentileImage()
>>> percer.inputs.in_file = "functional.nii" # doctest: +SKIP
>>> percer.dimension = "T"
>>> percer.perc = 90
>>> percer.cmdline # doctest: +SKIP
'fslmaths functional.nii -Tperc 90 functional_perc.nii'
"""
input_spec = PercentileImageInput
_suffix = "_perc"
class MaxnImageInput(MathsInput):
dimension = traits.Enum("T", "X", "Y", "Z", usedefault=True,
argstr="-%smaxn", position=4,
desc="dimension to index max across")
class MaxnImage(MathsCommand):
"""Use fslmaths to generate an image of index of max across
a given dimension.
"""
input_spec = MaxnImageInput
_suffix = "_maxn"
class MinImageInput(MathsInput):
dimension = traits.Enum("T", "X", "Y", "Z", usedefault=True,
argstr="-%smin", position=4,
desc="dimension to min across")
class MinImage(MathsCommand):
"""Use fslmaths to generate a minimum image across a given dimension.
"""
input_spec = MinImageInput
_suffix = "_min"
class MedianImageInput(MathsInput):
dimension = traits.Enum("T", "X", "Y", "Z", usedefault=True,
argstr="-%smedian", position=4,
desc="dimension to median across")
class MedianImage(MathsCommand):
"""Use fslmaths to generate a median image across a given dimension.
"""
input_spec = MedianImageInput
_suffix = "_median"
class AR1ImageInput(MathsInput):
dimension = traits.Enum("T", "X", "Y", "Z", usedefault=True,
argstr="-%sar1", position=4,
desc=("dimension to find AR(1) coefficient"
"across"))
class AR1Image(MathsCommand):
"""Use fslmaths to generate an AR1 coefficient image across a
given dimension. (Should use -odt float and probably demean first)
"""
input_spec = AR1ImageInput
_suffix = "_ar1"
class IsotropicSmoothInput(MathsInput):
fwhm = traits.Float(mandatory=True, xor=["sigma"],
position=4, argstr="-s %.5f",
desc="fwhm of smoothing kernel [mm]")
sigma = traits.Float(mandatory=True, xor=["fwhm"],
position=4, argstr="-s %.5f",
desc="sigma of smoothing kernel [mm]")
class IsotropicSmooth(MathsCommand):
"""Use fslmaths to spatially smooth an image with a gaussian kernel.
"""
input_spec = IsotropicSmoothInput
_suffix = "_smooth"
def _format_arg(self, name, spec, value):
if name == "fwhm":
sigma = float(value) / np.sqrt(8 * np.log(2))
return spec.argstr % sigma
return super(IsotropicSmooth, self)._format_arg(name, spec, value)
class ApplyMaskInput(MathsInput):
mask_file = File(exists=True, mandatory=True, argstr="-mas %s", position=4,
desc="binary image defining mask space")
class ApplyMask(MathsCommand):
"""Use fslmaths to apply a binary mask to another image.
"""
input_spec = ApplyMaskInput
_suffix = "_masked"
class KernelInput(MathsInput):
kernel_shape = traits.Enum("3D", "2D", "box", "boxv", "gauss", "sphere",
"file",
argstr="-kernel %s", position=4,
desc="kernel shape to use")
kernel_size = traits.Float(argstr="%.4f", position=5, xor=["kernel_file"],
desc=("kernel size - voxels for box/boxv, mm "
"for sphere, mm sigma for gauss"))
kernel_file = File(exists=True, argstr="%s", position=5,
xor=["kernel_size"],
desc="use external file for kernel")
class DilateInput(KernelInput):
operation = traits.Enum("mean", "modal", "max", argstr="-dil%s",
position=6, mandatory=True,
desc="filtering operation to perfoem in dilation")
class DilateImage(MathsCommand):
"""Use fslmaths to perform a spatial dilation of an image.
"""
input_spec = DilateInput
_suffix = "_dil"
def _format_arg(self, name, spec, value):
if name == "operation":
return spec.argstr % dict(mean="M", modal="D", max="F")[value]
return super(DilateImage, self)._format_arg(name, spec, value)
class ErodeInput(KernelInput):
minimum_filter = traits.Bool(argstr="%s", position=6, usedefault=True,
default_value=False,
desc=("if true, minimum filter rather than "
"erosion by zeroing-out"))
class ErodeImage(MathsCommand):
"""Use fslmaths to perform a spatial erosion of an image.
"""
input_spec = ErodeInput
_suffix = "_ero"
def _format_arg(self, name, spec, value):
if name == "minimum_filter":
if value:
return "-eroF"
return "-ero"
return super(ErodeImage, self)._format_arg(name, spec, value)
class SpatialFilterInput(KernelInput):
operation = traits.Enum("mean", "median", "meanu", argstr="-f%s",
position=6, mandatory=True,
desc="operation to filter with")
class SpatialFilter(MathsCommand):
"""Use fslmaths to spatially filter an image.
"""
input_spec = SpatialFilterInput
_suffix = "_filt"
class UnaryMathsInput(MathsInput):
operation = traits.Enum("exp", "log", "sin", "cos", "tan", "asin", "acos",
"atan", "sqr", "sqrt", "recip", "abs", "bin",
"binv", "fillh", "fillh26", "index", "edge", "nan",
"nanm", "rand", "randn", "range",
argstr="-%s", position=4, mandatory=True,
desc="operation to perform")
class UnaryMaths(MathsCommand):
"""Use fslmaths to perorm a variety of mathematical operations on an image.
"""
input_spec = UnaryMathsInput
def _list_outputs(self):
self._suffix = "_" + self.inputs.operation
return super(UnaryMaths, self)._list_outputs()
class BinaryMathsInput(MathsInput):
operation = traits.Enum("add", "sub", "mul", "div", "rem", "max", "min",
mandatory=True, argstr="-%s", position=4,
desc="operation to perform")
operand_file = File(exists=True, argstr="%s", mandatory=True,
position=5, xor=["operand_value"],
desc="second image to perform operation with")
operand_value = traits.Float(argstr="%.8f", mandatory=True,
position=5, xor=["operand_file"],
desc="value to perform operation with")
class BinaryMaths(MathsCommand):
"""Use fslmaths to perform mathematical operations using a second image or
a numeric value.
"""
input_spec = BinaryMathsInput
class MultiImageMathsInput(MathsInput):
op_string = traits.String(position=4, argstr="%s", mandatory=True,
desc=("python formatted string of operations "
"to perform"))
operand_files = InputMultiPath(File(exists=True), mandatory=True,
desc=("list of file names to plug into op "
"string"))
class MultiImageMaths(MathsCommand):
"""Use fslmaths to perform a sequence of mathematical operations.
Examples
--------
>>> from nipype.interfaces.fsl import MultiImageMaths
>>> maths = MultiImageMaths()
>>> maths.inputs.in_file = "functional.nii"
>>> maths.inputs.op_string = "-add %s -mul -1 -div %s"
>>> maths.inputs.operand_files = ["functional2.nii", "functional3.nii"]
>>> maths.inputs.out_file = "functional4.nii"
>>> maths.cmdline # doctest: +ALLOW_UNICODE
'fslmaths functional.nii -add functional2.nii -mul -1 -div functional3.nii functional4.nii'
"""
input_spec = MultiImageMathsInput
def _format_arg(self, name, spec, value):
if name == "op_string":
return value % tuple(self.inputs.operand_files)
return super(MultiImageMaths, self)._format_arg(name, spec, value)
class TemporalFilterInput(MathsInput):
lowpass_sigma = traits.Float(-1, argstr="%.6f", position=5,
usedefault=True,
desc="lowpass filter sigma (in volumes)")
highpass_sigma = traits.Float(-1, argstr="-bptf %.6f", position=4,
usedefault=True,
desc="highpass filter sigma (in volumes)")
class TemporalFilter(MathsCommand):
"""Use fslmaths to apply a low, high, or bandpass temporal filter to a
timeseries.
"""
input_spec = TemporalFilterInput
_suffix = "_filt"
| |
#!/usr/bin/env python2.6
# coding: utf-8
import conf
import logging
import traceback
import logging.handlers
import time
from stat import ST_DEV, ST_INO
import os.path
import os
import sys
# DEFAULT_FORMAT = "%(levelcolor)s[%(asctime)s,%(process)d,%(filename)s,%(lineno)d,%(levelname)s]%(endcolor)s %(message)s"
DEFAULT_FORMAT = "[%(asctime)s,%(process)d-%(thread)d,%(filename)s,%(lineno)d,%(levelname)s] %(message)s"
#DEFAULT_FORMAT = "[%(asctime)s] %(levelname)5s #%(process)5d %(message)s"
DEFAULT_DATETIME_FORMAT = '%m%d-%H:%M:%S'
stdHandlerSet = set()
APPNAME = sys.argv[0]
APPNAME = APPNAME.split( '/' )[ -1 ]
if APPNAME.endswith( '.py' ):
APPNAME = APPNAME[:-3]
if APPNAME == '-c':
APPNAME = '_instantCommand'
LOG_FILENAME = '_instantCommand'
else:
if os.name != 'nt' :
#LOG_FILENAME = os.path.join( conf.PATH['log_dir'], APPNAME+'.out' )
LOG_FILENAME = os.path.join( '/tmp', APPNAME+'.out' )
else :
LOG_FILENAME = ''
# an util to prevent evaluating arguments of logging statement
def iflog( lvl ): return logger.getEffectiveLevel() <= lvl
def ifdebug(): return logger.getEffectiveLevel() <= logging.DEBUG
def ifinfo(): return logger.getEffectiveLevel() <= logging.INFO
def ifwarn(): return logger.getEffectiveLevel() <= logging.WARNING
def iferror(): return logger.getEffectiveLevel() <= logging.ERROR
def ifcritical(): return logger.getEffectiveLevel() <= logging.CRITICAL
logging.NOTIFIED = 25
logging.addLevelName( logging.NOTIFIED, 'NOTIFIED' )
class MyFormatter( logging.Formatter ):
COLORS = dict(
zip([ 'grey',
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white',
], range(30, 38) )
)
_colordict = {
'DEBUG' : COLORS['blue'],
'INFO' : COLORS['green'],
'WARNING' : COLORS['yellow'],
'ERROR' : COLORS['red'],
'CRITICAL' : COLORS['magenta'],
}
def format( self, record ) :
record.levelcolor = '\033[%dm' % (self._colordict.get(record.levelname,0),)
record.endcolor = '\033[0m'
return logging.Formatter.format( self, record )
class ctimeFormatter( logging.Formatter ):
def formatTime( self, record, datefmt=None ):
return str(int(time.time()))
class MyLogger( logging.getLoggerClass() ):
def notified( self, msg, *args, **kwargs ):
return self.log( logging.NOTIFIED, msg, *args, **kwargs )
if os.name != 'nt' :
logging.setLoggerClass( MyLogger )
def reset_defaults( appname = None ):
global APPNAME
global LOG_FILENAME
APPNAME = appname if appname!=None else sys.argv[0]
APPNAME = APPNAME.split( '/' )[ -1 ]
if APPNAME.endswith( '.py' ):
APPNAME = APPNAME[:-3]
LOG_FILENAME = os.path.join( conf.PATH['log_dir'], APPNAME+'.out' )
return
def getdefaultlogger( appname = None ):
logname = appname or 'genlogger'
# Set up a specific logger with our desired output level
logger = logging.getLogger( logname )
return logger
class S3LogHandler( logging.handlers.WatchedFileHandler ):
"""
Fix the bug that stat checking after existence checking of log file
raises an OSError
r"""
def emit(self, record):
try:
stat = os.stat(self.baseFilename)
changed = (stat[ST_DEV] != self.dev) or (stat[ST_INO] != self.ino)
except OSError, e:
stat = None
changed = 1
# if not os.path.exists(self.baseFilename):
# stat = None
# changed = 1
# else:
# stat = os.stat(self.baseFilename)
# changed = (stat[ST_DEV] != self.dev) or (stat[ST_INO] != self.ino)
if changed and self.stream is not None:
self.stream.flush()
self.stream.close()
self.stream = self._open()
if stat is None:
stat = os.stat(self.baseFilename)
self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
logging.FileHandler.emit(self, record)
def createlogger ( appname = None, level = logging.DEBUG,
format = None, formatter = logging.Formatter ):
if appname != None :
filename = os.path.join( conf.PATH['log_dir'], appname+'.out' )
else :
filename = LOG_FILENAME
logname = appname or 'genlogger'
# Set up a specific logger with our desired output level
logger = logging.getLogger( logname )
logger.setLevel( level )
# Add the log message handler to the logger
#
# class logging.handlers.RotatingFileHandler
# (filename[, mode[, maxBytes[, backupCount[, encoding[, delay]]]]])
# class logging.handlers.TimedRotatingFileHandler
# (filename[, when[, interval[, backupCount[, encoding[, delay
# [, utc]]]]]])
#
# handler = logging.handlers.RotatingFileHandler(
# LOG_FILENAME, maxBytes=20, backupCount=5)
# WatchedFileHandler automatically switch log file back to <filename> if
# log file is moved.
handler = S3LogHandler( filename )
# handler = logging.handlers.TimedRotatingFileHandler( filename,
# when='S',
# interval=10,)
# # backupCount=10,)
format = format or DEFAULT_FORMAT
# create formatter
# formatter = logging.Formatter( format, DEFAULT_DATETIME_FORMAT )
_formatter = formatter( format )
# formatter = MyFormatter( format, DEFAULT_DATETIME_FORMAT )
# add formatter to ch
handler.setFormatter(_formatter)
logger.handlers = []
logger.addHandler( handler )
return logger
def add_std_handler( logger, stream = None, format = None, datefmt = None ):
''' Default stream is sys.stdout
'''
stream = stream or sys.stdout
if stream in stdHandlerSet:
return logger
stdHandlerSet.add( stream )
stdhandler = logging.StreamHandler( stream )
stdhandler.setFormatter(
logging.Formatter( format or DEFAULT_FORMAT,
datefmt ) )
logger.addHandler( stdhandler )
return logger
def setloggerformat( format, datefmt = None ):
logger = logging.getLogger('genlogger')
formatter = logging.Formatter( format, datefmt )
for handler in logger.handlers :
handler.setFormatter(formatter)
return logger
if os.name != 'nt' :
logger = createlogger()
else :
logger = None
def trace_warn( e ):
logger.warn( traceback.format_exc() )
logger.warn( repr( e ) )
def trace_error( e ):
logger.error( traceback.format_exc() )
logger.error( repr( e ) )
def stack_list( offset=0 ):
offset += 1 # count this function as 1
# list of ( filename, line-nr, in-what-function, statement )
x = traceback.extract_stack()
return x[ : -offset ]
def format_stack( stacks ):
x = [ "{0}:{1} {3}".format( *xx ) for xx in stacks ]
x = ' --- '.join( x )
return x
def stack_str( offset=0 ):
offset += 1 # count this function as 1
return format_stack( stack_list( offset ) )
def deprecate( lgr=None, mes='' ):
lgr = lgr or logger
if mes != '':
mes = mes + ' '
lgr.warn( mes + "Deprecated: " + stack_str( offset=2 ) )
def setdefaultlogger():
global logger
#logger = getdefaultlogger()
logger = createlogger()
logger.debug( ' genlog Reopened' )
if __name__ == '__main__' :
add_std_handler( logger )
logger.debug( '123' )
import glob
# Log some messages
for i in range(20):
logger.debug('i = %d' % i)
def foo ():
logger.debug('print in %(funcName)s')
createlogger('wahaha')
for i in range(20):
logger.debug('i = %d' % i)
# See what files are created
logfiles = glob.glob('%s*' % LOG_FILENAME)
for filename in logfiles:
print filename
| |
#STANDARD LIB
import datetime
import decimal
import warnings
#LIBRARIES
from django.conf import settings
from django.db.backends import (
BaseDatabaseOperations,
BaseDatabaseClient,
BaseDatabaseIntrospection,
BaseDatabaseWrapper,
BaseDatabaseFeatures,
BaseDatabaseValidation
)
try:
from django.db.backends.schema import BaseDatabaseSchemaEditor
except ImportError:
#Django < 1.7 doesn't have BaseDatabaseSchemaEditor
class BaseDatabaseSchemaEditor(object):
pass
from django.db.backends.creation import BaseDatabaseCreation
from django.utils import timezone
from google.appengine.api.datastore_types import Blob, Text
from google.appengine.ext.db import metadata
from google.appengine.datastore import datastore_stub_util
from google.appengine.api.datastore import Key
from google.appengine.api import datastore
#DJANGAE
from djangae.db.utils import (
decimal_to_string,
make_timezone_naive,
get_datastore_key,
)
from djangae.db import caching
from djangae.indexing import load_special_indexes
from .commands import (
SelectCommand,
InsertCommand,
FlushCommand,
UpdateCommand,
DeleteCommand,
coerce_unicode,
get_field_from_column
)
from djangae.db.backends.appengine import dbapi as Database
class Connection(object):
""" Dummy connection class """
def __init__(self, wrapper, params):
self.creation = wrapper.creation
self.ops = wrapper.ops
self.params = params
self.queries = []
def rollback(self):
pass
def commit(self):
pass
def close(self):
pass
class Cursor(object):
""" Dummy cursor class """
def __init__(self, connection):
self.connection = connection
self.start_cursor = None
self.returned_ids = []
self.rowcount = -1
self.last_select_command = None
self.last_delete_command = None
def execute(self, sql, *params):
if isinstance(sql, SelectCommand):
# Also catches subclasses of SelectCommand (e.g Update)
self.last_select_command = sql
self.rowcount = self.last_select_command.execute() or -1
elif isinstance(sql, FlushCommand):
sql.execute()
elif isinstance(sql, UpdateCommand):
self.rowcount = sql.execute()
elif isinstance(sql, DeleteCommand):
self.rowcount = sql.execute()
elif isinstance(sql, InsertCommand):
self.connection.queries.append(sql)
self.returned_ids = sql.execute()
else:
raise Database.CouldBeSupportedError("Can't execute traditional SQL: '%s' (although perhaps we could make GQL work)", sql)
def next(self):
row = self.fetchone()
if row is None:
raise StopIteration
return row
def fetchone(self, delete_flag=False):
try:
if isinstance(self.last_select_command.results, (int, long)):
# Handle aggregate (e.g. count)
return (self.last_select_command.results, )
else:
entity = self.last_select_command.next_result()
except StopIteration: #FIXME: does this ever get raised? Where from?
entity = None
if entity is None:
return None
## FIXME: Move this to SelectCommand.next_result()
result = []
# If there is extra_select prepend values to the results list
for col, query in self.last_select_command.extra_select.items():
result.append(entity.get(col))
for col in self.last_select_command.queried_fields:
if col == "__key__":
key = entity if isinstance(entity, Key) else entity.key()
self.returned_ids.append(key)
result.append(key.id_or_name())
else:
field = get_field_from_column(self.last_select_command.model, col)
value = self.connection.ops.convert_values(entity.get(col), field)
result.append(value)
return result
def fetchmany(self, size, delete_flag=False):
if not self.last_select_command.results:
return []
result = []
i = 0
while i < size:
entity = self.fetchone(delete_flag)
if entity is None:
break
result.append(entity)
i += 1
return result
@property
def lastrowid(self):
return self.returned_ids[-1].id_or_name()
def __iter__(self):
return self
def close(self):
pass
MAXINT = 9223372036854775808
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "djangae.db.backends.appengine.compiler"
# Datastore will store all integers as 64bit long values
integer_field_ranges = {
'SmallIntegerField': (-MAXINT, MAXINT-1),
'IntegerField': (-MAXINT, MAXINT-1),
'BigIntegerField': (-MAXINT, MAXINT-1),
'PositiveSmallIntegerField': (0, MAXINT-1),
'PositiveIntegerField': (0, MAXINT-1),
}
def quote_name(self, name):
return name
def convert_values(self, value, field):
""" Called when returning values from the datastore"""
value = super(DatabaseOperations, self).convert_values(value, field)
db_type = field.db_type(self.connection)
if db_type == 'string' and isinstance(value, str):
value = value.decode("utf-8")
elif db_type == "datetime":
value = self.connection.ops.value_from_db_datetime(value)
elif db_type == "date":
value = self.connection.ops.value_from_db_date(value)
elif db_type == "time":
value = self.connection.ops.value_from_db_time(value)
elif db_type == "decimal":
value = self.connection.ops.value_from_db_decimal(value)
elif db_type == 'list':
if not value:
value = [] # Convert None back to an empty list
elif db_type == 'set':
if not value:
value = set()
else:
value = set(value)
return value
def sql_flush(self, style, tables, seqs, allow_cascade=False):
return [ FlushCommand(table) for table in tables ]
def prep_lookup_key(self, model, value, field):
if isinstance(value, basestring):
value = value[:500]
left = value[500:]
if left:
warnings.warn("Truncating primary key that is over 500 characters. "
"THIS IS AN ERROR IN YOUR PROGRAM.",
RuntimeWarning)
# This is a bit of a hack. Basically when you query an integer PK with a
# string containing an int. SQL seems to return the row regardless of type, and as far as
# I can tell, Django at no point tries to cast the value to an integer. So, in the
# case where the internal type is an AutoField, we try to cast the string value
# I would love a more generic solution... patches welcome!
# It would be nice to see the SQL output of the lookup_int_as_str test is on SQL, if
# the string is converted to an int, I'd love to know where!
if field.get_internal_type() == 'AutoField':
try:
value = int(value)
except (TypeError, ValueError):
pass
value = get_datastore_key(model, value)
else:
value = get_datastore_key(model, value)
return value
def prep_lookup_decimal(self, model, value, field):
return self.value_to_db_decimal(value, field.max_digits, field.decimal_places)
def prep_lookup_date(self, model, value, field):
if isinstance(value, datetime.datetime):
return value
return self.value_to_db_date(value)
def prep_lookup_time(self, model, value, field):
if isinstance(value, datetime.datetime):
return value
return self.value_to_db_time(value)
def prep_lookup_value(self, model, value, field, column=None):
if field.primary_key and (not column or column == model._meta.pk.column):
return self.prep_lookup_key(model, value, field)
db_type = field.db_type(self.connection)
if db_type == 'decimal':
return self.prep_lookup_decimal(model, value, field)
elif db_type == 'date':
return self.prep_lookup_date(model, value, field)
elif db_type == 'time':
return self.prep_lookup_time(model, value, field)
elif db_type in ('list', 'set'):
if hasattr(value, "__len__") and not value:
value = None #Convert empty lists to None
elif hasattr(value, "__iter__"):
# Convert sets to lists
value = list(value)
return value
def value_for_db(self, value, field):
if value is None:
return None
db_type = field.db_type(self.connection)
if db_type == 'string' or db_type == 'text':
value = coerce_unicode(value)
if db_type == 'text':
value = Text(value)
elif db_type == 'bytes':
# Store BlobField, DictField and EmbeddedModelField values as Blobs.
value = Blob(value)
elif db_type == 'decimal':
value = self.value_to_db_decimal(value, field.max_digits, field.decimal_places)
elif db_type in ('list', 'set'):
if hasattr(value, "__len__") and not value:
value = None #Convert empty lists to None
elif hasattr(value, "__iter__"):
# Convert sets to lists
value = list(value)
return value
def last_insert_id(self, cursor, db_table, column):
return cursor.lastrowid
def fetch_returned_insert_id(self, cursor):
return cursor.lastrowid
def value_to_db_datetime(self, value):
value = make_timezone_naive(value)
return value
def value_to_db_date(self, value):
if value is not None:
value = datetime.datetime.combine(value, datetime.time())
return value
def value_to_db_time(self, value):
if value is not None:
value = make_timezone_naive(value)
value = datetime.datetime.combine(datetime.datetime.fromtimestamp(0), value)
return value
def value_to_db_decimal(self, value, max_digits, decimal_places):
if isinstance(value, decimal.Decimal):
return decimal_to_string(value, max_digits, decimal_places)
return value
# Unlike value_to_db, these are not overridden or standard Django, it's just nice to have symmetry
def value_from_db_datetime(self, value):
if isinstance(value, (int, long)):
# App Engine Query's don't return datetime fields (unlike Get) I HAVE NO IDEA WHY
value = datetime.datetime.fromtimestamp(float(value) / 1000000.0)
if value is not None and settings.USE_TZ and timezone.is_naive(value):
value = value.replace(tzinfo=timezone.utc)
return value
def value_from_db_date(self, value):
if isinstance(value, (int, long)):
# App Engine Query's don't return datetime fields (unlike Get) I HAVE NO IDEA WHY
value = datetime.datetime.fromtimestamp(float(value) / 1000000.0)
if value:
value = value.date()
return value
def value_from_db_time(self, value):
if isinstance(value, (int, long)):
# App Engine Query's don't return datetime fields (unlike Get) I HAVE NO IDEA WHY
value = datetime.datetime.fromtimestamp(float(value) / 1000000.0).time()
if value is not None and settings.USE_TZ and timezone.is_naive(value):
value = value.replace(tzinfo=timezone.utc)
if value:
value = value.time()
return value
def value_from_db_decimal(self, value):
if value:
value = decimal.Decimal(value)
return value
class DatabaseClient(BaseDatabaseClient):
pass
class DatabaseCreation(BaseDatabaseCreation):
data_types = {
'AutoField': 'key',
'RelatedAutoField': 'key',
'ForeignKey': 'key',
'OneToOneField': 'key',
'ManyToManyField': 'key',
'BigIntegerField': 'long',
'BooleanField': 'bool',
'CharField': 'string',
'CommaSeparatedIntegerField': 'string',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'EmailField': 'string',
'FileField': 'string',
'FilePathField': 'string',
'FloatField': 'float',
'ImageField': 'string',
'IntegerField': 'integer',
'IPAddressField': 'string',
'NullBooleanField': 'bool',
'PositiveIntegerField': 'integer',
'PositiveSmallIntegerField': 'integer',
'SlugField': 'string',
'SmallIntegerField': 'integer',
'TimeField': 'time',
'URLField': 'string',
'TextField': 'text',
'XMLField': 'text',
}
def __init__(self, *args, **kwargs):
self.testbed = None
super(DatabaseCreation, self).__init__(*args, **kwargs)
def sql_create_model(self, model, *args, **kwargs):
return [], {}
def sql_for_pending_references(self, model, *args, **kwargs):
return []
def sql_indexes_for_model(self, model, *args, **kwargs):
return []
def _create_test_db(self, verbosity, autoclobber):
from google.appengine.ext import testbed # Imported lazily to prevent warnings on GAE
assert not self.testbed
# We allow users to disable scattered IDs in tests. This primarily for running Django tests that
# assume implicit ordering (yeah, annoying)
use_scattered = not getattr(settings, "DJANGAE_SEQUENTIAL_IDS_IN_TESTS", False)
kwargs = {
"use_sqlite": True,
"auto_id_policy": testbed.AUTO_ID_POLICY_SCATTERED if use_scattered else testbed.AUTO_ID_POLICY_SEQUENTIAL,
"consistency_policy": datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=1)
}
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub(**kwargs)
self.testbed.init_memcache_stub()
caching.clear_context_cache()
def _destroy_test_db(self, name, verbosity):
if self.testbed:
caching.clear_context_cache()
self.testbed.deactivate()
self.testbed = None
class DatabaseIntrospection(BaseDatabaseIntrospection):
@datastore.NonTransactional
def get_table_list(self, cursor):
return metadata.get_kinds()
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
def column_sql(self, model, field):
return "", {}
def create_model(self, model):
""" Don't do anything when creating tables """
pass
def alter_unique_together(self, *args, **kwargs):
pass
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = []
supports_transactions = False #FIXME: Make this True!
can_return_id_from_insert = True
supports_select_related = False
autocommits_when_autocommit_is_off = True
uses_savepoints = False
allows_auto_pk_0 = False
class DatabaseWrapper(BaseDatabaseWrapper):
operators = {
'exact': '= %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s'
}
Database = Database
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
self.autocommit = True
def is_usable(self):
return True
def get_connection_params(self):
return {}
def get_new_connection(self, params):
conn = Connection(self, params)
load_special_indexes() # make sure special indexes are loaded
return conn
def init_connection_state(self):
pass
def _start_transaction_under_autocommit(self):
pass
def _set_autocommit(self, enabled):
self.autocommit = enabled
def create_cursor(self):
if not self.connection:
self.connection = self.get_new_connection(self.settings_dict)
return Cursor(self.connection)
def schema_editor(self, *args, **kwargs):
return DatabaseSchemaEditor(self, *args, **kwargs)
def _cursor(self):
# For < Django 1.6 compatibility
return self.create_cursor()
| |
from __future__ import print_function
from typing import cast, Any, Dict, Iterable, List, Mapping, Optional, Sequence, Tuple, Text
from confirmation.models import Confirmation
from django.conf import settings
from django.template import loader
from django.utils.timezone import now as timezone_now
from zerver.decorator import statsd_increment
from zerver.lib.send_email import send_future_email, display_email, \
send_email_from_dict
from zerver.lib.queue import queue_json_publish
from zerver.models import (
Recipient,
ScheduledJob,
UserMessage,
Stream,
get_display_recipient,
UserProfile,
get_user_profile_by_email,
get_user_profile_by_id,
receives_offline_notifications,
get_context_for_message,
Message,
Realm,
)
import datetime
import re
import subprocess
import ujson
from six.moves import urllib
from collections import defaultdict
def unsubscribe_token(user_profile):
# type: (UserProfile) -> Text
# Leverage the Django confirmations framework to generate and track unique
# unsubscription tokens.
return Confirmation.objects.get_link_for_object(user_profile).split("/")[-1]
def one_click_unsubscribe_link(user_profile, endpoint):
# type: (UserProfile, Text) -> Text
"""
Generate a unique link that a logged-out user can visit to unsubscribe from
Zulip e-mails without having to first log in.
"""
token = unsubscribe_token(user_profile)
resource_path = "accounts/unsubscribe/%s/%s" % (endpoint, token)
return "%s/%s" % (user_profile.realm.uri.rstrip("/"), resource_path)
def hash_util_encode(string):
# type: (Text) -> Text
# Do the same encoding operation as hash_util.encodeHashComponent on the
# frontend.
# `safe` has a default value of "/", but we want those encoded, too.
return urllib.parse.quote(
string.encode("utf-8"), safe=b"").replace(".", "%2E").replace("%", ".")
def pm_narrow_url(realm, participants):
# type: (Realm, List[Text]) -> Text
participants.sort()
base_url = u"%s/#narrow/pm-with/" % (realm.uri,)
return base_url + hash_util_encode(",".join(participants))
def stream_narrow_url(realm, stream):
# type: (Realm, Text) -> Text
base_url = u"%s/#narrow/stream/" % (realm.uri,)
return base_url + hash_util_encode(stream)
def topic_narrow_url(realm, stream, topic):
# type: (Realm, Text, Text) -> Text
base_url = u"%s/#narrow/stream/" % (realm.uri,)
return u"%s%s/topic/%s" % (base_url, hash_util_encode(stream),
hash_util_encode(topic))
def build_message_list(user_profile, messages):
# type: (UserProfile, List[Message]) -> List[Dict[str, Any]]
"""
Builds the message list object for the missed message email template.
The messages are collapsed into per-recipient and per-sender blocks, like
our web interface
"""
messages_to_render = [] # type: List[Dict[str, Any]]
def sender_string(message):
# type: (Message) -> Text
if message.recipient.type in (Recipient.STREAM, Recipient.HUDDLE):
return message.sender.full_name
else:
return ''
def relative_to_full_url(content):
# type: (Text) -> Text
# URLs for uploaded content are of the form
# "/user_uploads/abc.png". Make them full paths.
#
# There's a small chance of colliding with non-Zulip URLs containing
# "/user_uploads/", but we don't have much information about the
# structure of the URL to leverage.
content = re.sub(
r"/user_uploads/(\S*)",
user_profile.realm.uri + r"/user_uploads/\1", content)
# Our proxying user-uploaded images seems to break inline images in HTML
# emails, so scrub the image but leave the link.
content = re.sub(
r"<img src=(\S+)/user_uploads/(\S+)>", "", content)
# URLs for emoji are of the form
# "static/generated/emoji/images/emoji/snowflake.png".
content = re.sub(
r"/static/generated/emoji/images/emoji/",
user_profile.realm.uri + r"/static/generated/emoji/images/emoji/",
content)
return content
def fix_plaintext_image_urls(content):
# type: (Text) -> Text
# Replace image URLs in plaintext content of the form
# [image name](image url)
# with a simple hyperlink.
return re.sub(r"\[(\S*)\]\((\S*)\)", r"\2", content)
def fix_emoji_sizes(html):
# type: (Text) -> Text
return html.replace(' class="emoji"', ' height="20px"')
def build_message_payload(message):
# type: (Message) -> Dict[str, Text]
plain = message.content
plain = fix_plaintext_image_urls(plain)
plain = relative_to_full_url(plain)
assert message.rendered_content is not None
html = message.rendered_content
html = relative_to_full_url(html)
html = fix_emoji_sizes(html)
return {'plain': plain, 'html': html}
def build_sender_payload(message):
# type: (Message) -> Dict[str, Any]
sender = sender_string(message)
return {'sender': sender,
'content': [build_message_payload(message)]}
def message_header(user_profile, message):
# type: (UserProfile, Message) -> Dict[str, Any]
disp_recipient = get_display_recipient(message.recipient)
if message.recipient.type == Recipient.PERSONAL:
header = u"You and %s" % (message.sender.full_name,)
html_link = pm_narrow_url(user_profile.realm, [message.sender.email])
header_html = u"<a style='color: #ffffff;' href='%s'>%s</a>" % (html_link, header)
elif message.recipient.type == Recipient.HUDDLE:
assert not isinstance(disp_recipient, Text)
other_recipients = [r['full_name'] for r in disp_recipient
if r['email'] != user_profile.email]
header = u"You and %s" % (", ".join(other_recipients),)
html_link = pm_narrow_url(user_profile.realm, [r["email"] for r in disp_recipient
if r["email"] != user_profile.email])
header_html = u"<a style='color: #ffffff;' href='%s'>%s</a>" % (html_link, header)
else:
assert isinstance(disp_recipient, Text)
header = u"%s > %s" % (disp_recipient, message.topic_name())
stream_link = stream_narrow_url(user_profile.realm, disp_recipient)
topic_link = topic_narrow_url(user_profile.realm, disp_recipient, message.subject)
header_html = u"<a href='%s'>%s</a> > <a href='%s'>%s</a>" % (
stream_link, disp_recipient, topic_link, message.subject)
return {"plain": header,
"html": header_html,
"stream_message": message.recipient.type_name() == "stream"}
# # Collapse message list to
# [
# {
# "header": {
# "plain":"header",
# "html":"htmlheader"
# }
# "senders":[
# {
# "sender":"sender_name",
# "content":[
# {
# "plain":"content",
# "html":"htmlcontent"
# }
# {
# "plain":"content",
# "html":"htmlcontent"
# }
# ]
# }
# ]
# },
# ]
messages.sort(key=lambda message: message.pub_date)
for message in messages:
header = message_header(user_profile, message)
# If we want to collapse into the previous recipient block
if len(messages_to_render) > 0 and messages_to_render[-1]['header'] == header:
sender = sender_string(message)
sender_block = messages_to_render[-1]['senders']
# Same message sender, collapse again
if sender_block[-1]['sender'] == sender:
sender_block[-1]['content'].append(build_message_payload(message))
else:
# Start a new sender block
sender_block.append(build_sender_payload(message))
else:
# New recipient and sender block
recipient_block = {'header': header,
'senders': [build_sender_payload(message)]}
messages_to_render.append(recipient_block)
return messages_to_render
@statsd_increment("missed_message_reminders")
def do_send_missedmessage_events_reply_in_zulip(user_profile, missed_messages, message_count):
# type: (UserProfile, List[Message], int) -> None
"""
Send a reminder email to a user if she's missed some PMs by being offline.
The email will have its reply to address set to a limited used email
address that will send a zulip message to the correct recipient. This
allows the user to respond to missed PMs, huddles, and @-mentions directly
from the email.
`user_profile` is the user to send the reminder to
`missed_messages` is a list of Message objects to remind about they should
all have the same recipient and subject
"""
from zerver.context_processors import common_context
# Disabled missedmessage emails internally
if not user_profile.enable_offline_email_notifications:
return
recipients = set((msg.recipient_id, msg.subject) for msg in missed_messages)
if len(recipients) != 1:
raise ValueError(
'All missed_messages must have the same recipient and subject %r' %
recipients
)
unsubscribe_link = one_click_unsubscribe_link(user_profile, "missed_messages")
context = common_context(user_profile)
context.update({
'name': user_profile.full_name,
'messages': build_message_list(user_profile, missed_messages),
'message_count': message_count,
'mention': missed_messages[0].recipient.type == Recipient.STREAM,
'unsubscribe_link': unsubscribe_link,
})
# If this setting (email mirroring integration) is enabled, only then
# can users reply to email to send message to Zulip. Thus, one must
# ensure to display warning in the template.
if settings.EMAIL_GATEWAY_PATTERN:
context.update({
'reply_warning': False,
'reply_to_zulip': True,
})
else:
context.update({
'reply_warning': True,
'reply_to_zulip': False,
})
from zerver.lib.email_mirror import create_missed_message_address
address = create_missed_message_address(user_profile, missed_messages[0])
senders = list(set(m.sender for m in missed_messages))
if (missed_messages[0].recipient.type == Recipient.HUDDLE):
display_recipient = get_display_recipient(missed_messages[0].recipient)
# Make sure that this is a list of strings, not a string.
assert not isinstance(display_recipient, Text)
other_recipients = [r['full_name'] for r in display_recipient
if r['id'] != user_profile.id]
context.update({'group_pm': True})
if len(other_recipients) == 2:
huddle_display_name = u"%s" % (" and ".join(other_recipients))
context.update({'huddle_display_name': huddle_display_name})
elif len(other_recipients) == 3:
huddle_display_name = u"%s, %s, and %s" % (other_recipients[0], other_recipients[1], other_recipients[2])
context.update({'huddle_display_name': huddle_display_name})
else:
huddle_display_name = u"%s, and %s others" % (', '.join(other_recipients[:2]), len(other_recipients) - 2)
context.update({'huddle_display_name': huddle_display_name})
elif (missed_messages[0].recipient.type == Recipient.PERSONAL):
context.update({'private_message': True})
else:
# Keep only the senders who actually mentioned the user
#
# TODO: When we add wildcard mentions that send emails, add
# them to the filter here.
senders = list(set(m.sender for m in missed_messages if
UserMessage.objects.filter(message=m, user_profile=user_profile,
flags=UserMessage.flags.mentioned).exists()))
context.update({'at_mention': True})
context.update({
'sender_str': ", ".join(sender.full_name for sender in senders),
'realm_str': user_profile.realm.name,
})
from_email = None
if len(senders) == 1 and settings.SEND_MISSED_MESSAGE_EMAILS_AS_USER:
# If this setting is enabled, you can reply to the Zulip
# missed message emails directly back to the original sender.
# However, one must ensure the Zulip server is in the SPF
# record for the domain, or there will be spam/deliverability
# problems.
sender = senders[0]
from_email = '"%s" <%s>' % (sender.full_name, sender.email)
context.update({
'reply_warning': False,
'reply_to_zulip': False,
})
email_dict = {
'template_prefix': 'zerver/emails/missed_message',
'to_email': display_email(user_profile),
'from_email': from_email,
'reply_to_email': address,
'context': context}
queue_json_publish("missedmessage_email_senders", email_dict, send_email_from_dict)
user_profile.last_reminder = timezone_now()
user_profile.save(update_fields=['last_reminder'])
def handle_missedmessage_emails(user_profile_id, missed_email_events):
# type: (int, Iterable[Dict[str, Any]]) -> None
message_ids = [event.get('message_id') for event in missed_email_events]
user_profile = get_user_profile_by_id(user_profile_id)
if not receives_offline_notifications(user_profile):
return
messages = Message.objects.filter(usermessage__user_profile_id=user_profile,
id__in=message_ids,
usermessage__flags=~UserMessage.flags.read)
# Cancel missed-message emails for deleted messages
messages = [um for um in messages if um.content != "(deleted)"]
if not messages:
return
messages_by_recipient_subject = defaultdict(list) # type: Dict[Tuple[int, Text], List[Message]]
for msg in messages:
messages_by_recipient_subject[(msg.recipient_id, msg.topic_name())].append(msg)
message_count_by_recipient_subject = {
recipient_subject: len(msgs)
for recipient_subject, msgs in messages_by_recipient_subject.items()
}
for msg_list in messages_by_recipient_subject.values():
msg = min(msg_list, key=lambda msg: msg.pub_date)
if msg.recipient.type == Recipient.STREAM:
msg_list.extend(get_context_for_message(msg))
# Send an email per recipient subject pair
for recipient_subject, msg_list in messages_by_recipient_subject.items():
unique_messages = {m.id: m for m in msg_list}
do_send_missedmessage_events_reply_in_zulip(
user_profile,
list(unique_messages.values()),
message_count_by_recipient_subject[recipient_subject],
)
def clear_followup_emails_queue(email):
# type: (Text) -> None
"""
Clear out queued emails that would otherwise be sent to a specific email address.
"""
items = ScheduledJob.objects.filter(type=ScheduledJob.EMAIL, filter_string__iexact = email)
items.delete()
def log_digest_event(msg):
# type: (Text) -> None
import logging
logging.basicConfig(filename=settings.DIGEST_LOG_PATH, level=logging.INFO)
logging.info(msg)
def enqueue_welcome_emails(email, name):
# type: (Text, Text) -> None
from zerver.context_processors import common_context
if settings.WELCOME_EMAIL_SENDER is not None:
# line break to avoid triggering lint rule
from_email = '%(name)s <%(email)s>' % \
settings.WELCOME_EMAIL_SENDER
else:
from_email = settings.ZULIP_ADMINISTRATOR
user_profile = get_user_profile_by_email(email)
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
context = common_context(user_profile)
context.update({
'verbose_support_offers': settings.VERBOSE_SUPPORT_OFFERS,
'unsubscribe_link': unsubscribe_link
})
send_future_email(
"zerver/emails/followup_day1", '%s <%s>' % (name, email),
from_email=from_email, context=context, delay=datetime.timedelta(hours=1))
send_future_email(
"zerver/emails/followup_day2", '%s <%s>' % (name, email),
from_email=from_email, context=context, delay=datetime.timedelta(days=1))
def convert_html_to_markdown(html):
# type: (Text) -> Text
# On Linux, the tool installs as html2markdown, and there's a command called
# html2text that does something totally different. On OSX, the tool installs
# as html2text.
commands = ["html2markdown", "html2text"]
for command in commands:
try:
# A body width of 0 means do not try to wrap the text for us.
p = subprocess.Popen(
[command, "--body-width=0"], stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
break
except OSError:
continue
markdown = p.communicate(input=html.encode('utf-8'))[0].decode('utf-8').strip()
# We want images to get linked and inline previewed, but html2text will turn
# them into links of the form ``, which is
# ugly. Run a regex over the resulting description, turning links of the
# form `` into
# `[image.png](http://foo.com/image.png)`.
return re.sub(u"!\\[\\]\\((\\S*)/(\\S*)\\?(\\S*)\\)",
u"[\\2](\\1/\\2)", markdown)
| |
##############################################################################
################## mlabwrap: transparently wraps matlab(tm) ##################
##############################################################################
##
## o author: Alexander Schmolck <a.schmolck@gmx.net>
## o created: 2002-05-29 21:51:59+00:40
## o version: see `__version__`
## o keywords: matlab wrapper
## o license: MIT
## o FIXME:
## - it seems proxies can somehow still 'disappear', maybe in connection
## with exceptions in the matlab workspace?
## - add test that defunct proxy-values are culled from matlab workspace
## (for some reason ipython seems to keep them alive somehwere, even after
## a zaphist, should find out what causes that!)
## - add tests for exception handling!
## - the proxy getitem/setitem only works quite properly for 1D arrays
## (matlab's moronic syntax also means that 'foo(bar)(watz)' is not the
## same as 'tmp = foo(bar); tmp(watz)' -- indeed chances are the former
## will fail (not, apparently however with 'foo{bar}{watz}' (blech)). This
## would make it quite hard to the proxy thing 'properly' for nested
## proxies, so things may break down for complicated cases but can be
## easily fixed manually e.g.: ``mlab._set('tmp', foo(bar));
## mlab._get('tmp',remove=True)[watz]``
## - Guess there should be some in principle avoidable problems with
## assignments to sub-proxies (in addition to the more fundamental,
## unavoidable problem that ``proxy[index].part = foo`` can't work as
## expected if ``proxy[index]`` is a marshallable value that doesn't need
## to be proxied itself; see below for workaround).
## o XXX:
## - better support of string 'arrays'
## - multi-dimensional arrays are unsupported
## - treatment of lists, tuples and arrays with non-numerical values (these
## should presumably be wrapped into wrapper classes MlabCell etc.)
## - should test classes and further improve struct support?
## - should we transform 1D vectors into row vectors when handing them to
## matlab?
## - what should be flattend? Should there be a scalarization opition?
## - ``autosync_dirs`` is a bit of a hack (and maybe``handle_out``, too)...
## - is ``global mlab`` in unpickling of proxies OK?
## - hasattr fun for proxies (__deepcopy__ etc.)
## - check pickling
## o TODO:
## - delattr
## - better error reporting: test for number of input args etc.
## - add cloning of proxies.
## - pickling for nested proxies (and session management for pickling)
## - more tests
## o !!!:
## - matlab complex arrays are intelligently of type 'double'
## - ``class('func')`` but ``class(var)``
"""
mlabwrap
========
This module implements a powerful and simple to use wrapper that makes using
matlab(tm) from python almost completely transparent. To use simply do:
>>> from mlabwrap import mlab
and then just use whatever matlab command you like as follows:
>>> mlab.plot(range(10), 'ro:')
You can do more than just plotting:
>>> mlab.sort([3,1,2])
array([[ 1., 2., 3.]])
N.B.: The result here is a 1x3 matrix (and not a flat lenght 3 array) of type
double (and not int), as matlab built around matrices of type double (see
``MlabWrap._flatten_row_vecs``).
Matlab(tm)ab, unlike python has multiple value returns. To emulate calls like
``[a,b] = sort([3,2,1])`` just do:
>>> mlab.sort([3,1,2], nout=2)
(array([[ 1., 2., 3.]]), array([[ 2., 3., 1.]]))
For names that are reserved in python (like print) do:
>>> mlab.print_()
You can look at the documentation of a matlab function just by using help,
as usual:
>>> help(mlab.sort)
In almost all cases that should be enough -- if you need to do trickier
things, then get raw with ``mlab._do``, or build your child class that
handles what you want.
Fine points and limitations
---------------------------
- Only 2D matrices are directly supported as return values of matlab
functions (arbitrary matlab classes are supported via proxy objects --
in most cases this shouldn't make much of a difference (as these proxy
objects can be even pickled) -- still this functionality is yet
experimental).
One potential pitfall with structs (which are currently proxied) is that
setting indices of subarrays ``struct.part[index] = value`` might seem
to have no effect (since ``part`` can be directly represented as a
python array which will be modified without an effect on the proxy
``struct``'s contents); in that case::
some_array[index] = value; struct.part = some_array``
will have the desired effect.
- Matlab doesn't know scalars, or 1D arrays. Consequently all functions
that one might expect to return a scalar or 1D array will return a 1x1
array instead. Also, because matlab(tm) is built around the 'double'
matrix type (which also includes complex matrices), single floats and
integer types will be cast to double. Note that row and column vectors
can be autoconverted automatically to 1D arrays if that is desired (see
``_flatten_row_vecs``).
- for matlab(tm) function names like ``print`` that are reserved words in
python, so you have to add a trailing underscore (e.g. ``mlab.print_``).
- sometimes you will have to specify the number of return arguments of a
function, e.g. ``a,b,c = mlab.foo(nout=3)``. MlabWrap will normally try to
figure out for you whether the function you call returns 0 or more values
(in which case by default only the first value is returned!). For builtins
this might fail (since unfortunately there seems to be no foolproof way to
find out in matlab), but you can always lend a helping hand::
mlab.foo = mlab._make_mlab_command('foo', nout=3, doc=mlab.help('foo'))
Now ``mlab.foo()`` will by default always return 3 values, but you can still
get only one by doing ``mlab.foo(nout=1)``
- by default the working directory of matlab(tm) is kept in synch with that of
python to avoid unpleasant surprises. In case this behavior does instaed
cause you unpleasant surprises, you can turn it off with::
mlab._autosync_dirs = False
- you can customize how matlab is called by setting the environment variable
``MLABRAW_CMD_STR`` (e.g. to add useful opitons like '-nojvm'). For the
rather convoluted semantics see
<http://www.mathworks.com/access/helpdesk/help/techdoc/apiref/engopen.html>.
- if you don't want to use numpy arrays, but something else that's fine
too::
>>> import matrix from numpy.core.defmatrix
>>> mlab._array_cast = matrix
>>> mlab.sqrt([[4.], [1.], [0.]])
matrix([[ 2.],
[ 1.],
[ 0.]])
Credits
-------
This is really a wrapper around a wrapper (mlabraw) which in turn is a
modified and bugfixed version of Andrew Sterian's pymat
(http://claymore.engineer.gvsu.edu/~steriana/Python/pymat.html), so thanks go
to him for releasing his package as open source.
See the docu of ``MlabWrap`` and ``MatlabObjectProxy`` for more information.
"""
__docformat__ = "restructuredtext en"
__version__ = '1.1'
__author__ = "Alexander Schmolck <a.schmolck@gmx.net>"
import warnings
from pickle import PickleError
import operator
import os, sys, re
import weakref
import atexit
try:
import numpy
ndarray = numpy.ndarray
except ImportError:
import Numeric
ndarray = Numeric.ArrayType
from tempfile import gettempdir
import mlabraw
from awmstools import update, gensym, slurp, spitOut, isString, escape, strToTempfile, __saveVarsHelper
#XXX: nested access
def _flush_write_stdout(s):
"""Writes `s` to stdout and flushes. Default value for ``handle_out``."""
sys.stdout.write(s); sys.stdout.flush()
# XXX I changed this to no longer use weakrefs because it didn't seem 100%
# reliable on second thought; need to check if we need to do something to
# speed up proxy reclamation on the matlab side.
class CurlyIndexer(object):
"""A helper class to mimick ``foo{bar}``-style indexing in python."""
def __init__(self, proxy):
self.proxy = proxy
def __getitem__(self, index):
return self.proxy.__getitem__(index, '{}')
def __setitem__(self, index, value):
self.proxy.__setitem__(index, value, '{}')
class MlabObjectProxy(object):
"""A proxy class for matlab objects that can't be converted to python
types.
WARNING: There are impedance-mismatch issues between python and matlab
that make designing such a class difficult (e.g. dimensionality, indexing
and ``length`` work fundamentally different in matlab than in python), so
although this class currently tries to transparently support some stuff
(notably (1D) indexing, slicing and attribute access), other operations
(e.g. math operators and in particular __len__ and __iter__) are not yet
supported. Don't depend on the indexing semantics not to change.
Note:
Assigning to parts of proxy objects (e.g. ``proxy[index].part =
[[1,2,3]]``) should *largely* work as expected, the only exception
would be if ``proxy.foo[index] = 3`` where ``proxy.foo[index]`` is some
type that can be converted to python (i.e. an array or string, (or
cell, if cell conversion has been enabled)), because then ``proxy.foo``
returns a new python object. For these cases it's necessary to do::
some_array[index] = 3; proxy.foo = some_array
"""
def __init__(self, mlabwrap, name, parent=None):
self.__dict__['_mlabwrap'] = mlabwrap
self.__dict__['_name'] = name
"""The name is the name of the proxies representation in matlab."""
self.__dict__['_parent'] = parent
"""To fake matlab's ``obj{foo}`` style indexing."""
def __getstate__(self):
"Experimental pickling support."
if self.__dict__['_parent']:
raise PickleError(
"Only root instances of %s can currently be pickled." % \
type(self).__name__)
tmp_filename = os.path.join(gettempdir(), "mlab_pickle_%s.mat" % self._mlabwrap._session)
try:
mlab.save(tmp_filename, self._name)
mlab_contents = slurp(tmp_filename, binary=1)
finally:
if os.path.exists(tmp_filename): os.remove(tmp_filename)
return {'mlab_contents' : mlab_contents,
'name': self._name}
def __setstate__(self, state):
"Experimental unpickling support."
global mlab #XXX this should be dealt with correctly
old_name = state['name']
mlab_name = "UNPICKLED%s__" % gensym('')
tmp_filename = None
try:
tmp_filename = strToTempfile(
state['mlab_contents'], suffix='.mat', binary=1)
mlabraw.eval(mlab._session,
"TMP_UNPICKLE_STRUCT__ = load('%s', '%s');" % (
tmp_filename, old_name))
mlabraw.eval(mlab._session,
"%s = TMP_UNPICKLE_STRUCT__.%s;" % (mlab_name, old_name))
mlabraw.eval(mlab._session, "clear TMP_UNPICKLE_STRUCT__;")
# XXX
mlab._make_proxy(mlab_name, constructor=lambda *args: self.__init__(*args) or self)
mlabraw.eval(mlab._session, 'clear %s;' % mlab_name)
finally:
if tmp_filename and os.path.exists(tmp_filename):
os.remove(tmp_filename)
# FIXME clear'ing in case of error
def __repr__(self):
output = []
mlab._do('disp(%s)' % self._name, nout=0, handle_out=output.append)
rep = "".join(output)
klass = self._mlabwrap._do("class(%s)" % self._name)
## #XXX what about classes?
## if klass == "struct":
## rep = "\n" + self._mlabwrap._format_struct(self._name)
## else:
## rep = ""
return "<%s of matlab-class: %r; internal name: %r; has parent: %s>\n%s" % (
type(self).__name__, klass,
self._name, ['yes', 'no'][self._parent is None],
rep)
def __del__(self):
if self._parent is None:
mlabraw.eval(self._mlabwrap._session, 'clear %s;' % self._name)
def _get_part(self, to_get):
if self._mlabwrap._var_type(to_get) in self._mlabwrap._mlabraw_can_convert:
#!!! need assignment to TMP_VAL__ because `mlabraw.get` only works
# with 'atomic' values like ``foo`` and not e.g. ``foo.bar``.
mlabraw.eval(self._mlabwrap._session, "TMP_VAL__=%s" % to_get)
return self._mlabwrap._get('TMP_VAL__', remove=True)
return type(self)(self._mlabwrap, to_get, self)
def _set_part(self, to_set, value):
#FIXME s.a.
if isinstance(value, MlabObjectProxy):
mlabraw.eval(self._mlabwrap._session, "%s = %s;" % (to_set, value._name))
else:
self._mlabwrap._set("TMP_VAL__", value)
mlabraw.eval(self._mlabwrap._session, "%s = TMP_VAL__;" % to_set)
mlabraw.eval(self._mlabwrap._session, 'clear TMP_VAL__;')
def __getattr__(self, attr):
if attr == "_":
return self.__dict__.setdefault('_', CurlyIndexer(self))
else:
return self._get_part("%s.%s" % (self._name, attr))
def __setattr__(self, attr, value):
self._set_part("%s.%s" % (self._name, attr), value)
# FIXME still have to think properly about how to best translate Matlab semantics here...
def __nonzero__(self):
raise TypeError("%s does not yet implement truth testing" % type(self).__name__)
def __len__(self):
raise TypeError("%s does not yet implement __len__" % type(self).__name__)
def __iter__(self):
raise TypeError("%s does not yet implement iteration" % type(self).__name__)
def _matlab_str_repr(s):
if '\n' not in s:
return "'%s'" % s.replace("'","''")
else:
# Matlab's string literals suck. They can't represent all
# strings, so we need to use sprintf
return "sprintf('%s')" % escape(s).replace("'","''").replace("%", "%%")
_matlab_str_repr = staticmethod(_matlab_str_repr)
#FIXME: those two only work ok for 1D indexing
def _convert_index(self, index):
if isinstance(index, int):
return str(index + 1) # -> matlab 1-based indexing
elif isString(index):
return self._matlab_str_repr(index)
elif isinstance(index, slice):
if index == slice(None,None,None):
return ":"
elif index.step not in (None,1):
raise ValueError("Illegal index for a proxy %r" % index)
else:
start = (index.start or 0) + 1
if start == 0: start_s = 'end'
elif start < 0: start_s = 'end%d' % start
else: start_s = '%d' % start
if index.stop is None: stop_s = 'end'
elif index.stop < 0: stop_s = 'end%d' % index.stop
else: stop_s = '%d' % index.stop
return '%s:%s' % (start_s, stop_s)
else:
raise TypeError("Unsupported index type: %r." % type(index))
def __getitem__(self, index, parens='()'):
"""WARNING: Semi-finished, semantics might change because it's not yet
clear how to best bridge the matlab/python impedence match.
HACK: Matlab decadently allows overloading *2* different indexing parens,
``()`` and ``{}``, hence the ``parens`` option."""
index = self._convert_index(index)
return self._get_part("".join([self._name,parens[0],index,parens[1]]))
def __setitem__(self, index, value, parens='()'):
"""WARNING: see ``__getitem__``."""
index = self._convert_index(index)
return self._set_part("".join([self._name,parens[0],index,parens[1]]),
value)
class MlabConversionError(Exception):
"""Raised when a mlab type can't be converted to a python primitive."""
pass
class MlabWrap(object):
"""This class does most of the wrapping work. It manages a single matlab
session (you can in principle have multiple open sessions if you want,
but I can see little use for this, so this feature is largely untested)
and automatically translates all attribute requests (that don't start
with '_') to the appropriate matlab function calls. The details of this
handling can be controlled with a number of instance variables,
documented below."""
__all__ = [] #XXX a hack, so that this class can fake a module; don't mutate
def __init__(self):
"""Create a new matlab(tm) wrapper object.
"""
self._array_cast = None
"""specifies a cast for arrays. If the result of an
operation is a numpy array, ``return_type(res)`` will be returned
instead."""
self._autosync_dirs=True
"""`autosync_dirs` specifies whether the working directory of the
matlab session should be kept in sync with that of python."""
self._flatten_row_vecs = False
"""Automatically return 1xn matrices as flat numeric arrays."""
self._flatten_col_vecs = False
"""Automatically return nx1 matrices as flat numeric arrays."""
self._clear_call_args = True
"""Remove the function args from matlab workspace after each function
call. Otherwise they are left to be (partly) overwritten by the next
function call. This saves a function call in matlab but means that the
memory used up by the arguments will remain unreclaimed till
overwritten."""
self._session = mlabraw.open(os.getenv("MLABRAW_CMD_STR", ""))
atexit.register(lambda handle=self._session: mlabraw.close(handle))
self._proxies = weakref.WeakValueDictionary()
"""Use ``mlab._proxies.values()`` for a list of matlab object's that
are currently proxied."""
self._proxy_count = 0
self._mlabraw_can_convert = ('double', 'char')
"""The matlab(tm) types that mlabraw will automatically convert for us."""
self._dont_proxy = {'cell' : False}
"""The matlab(tm) types we can handle ourselves with a bit of
effort. To turn on autoconversion for e.g. cell arrays do:
``mlab._dont_proxy["cell"] = True``."""
def __del__(self):
mlabraw.close(self._session)
def _format_struct(self, varname):
res = []
fieldnames = self._do("fieldnames(%s)" % varname)
size = numpy.ravel(self._do("size(%s)" % varname))
return "%dx%d struct array with fields:\n%s" % (
size[0], size[1], "\n ".join([""] + fieldnames))
## fieldnames
## fieldvalues = self._do(",".join(["%s.%s" % (varname, fn)
## for fn in fieldnames]), nout=len(fieldnames))
## maxlen = max(map(len, fieldnames))
## return "\n".join(["%*s: %s" % (maxlen, (`fv`,`fv`[:20] + '...')[len(`fv`) > 23])
## for fv in fieldvalues])
def _var_type(self, varname):
mlabraw.eval(self._session,
"TMP_CLS__ = class(%(x)s); if issparse(%(x)s),"
"TMP_CLS__ = [TMP_CLS__,'-sparse']; end;" % dict(x=varname))
res_type = mlabraw.get(self._session, "TMP_CLS__")
mlabraw.eval(self._session, "clear TMP_CLS__;") # unlikely to need try/finally to ensure clear
return res_type
def _make_proxy(self, varname, parent=None, constructor=MlabObjectProxy):
"""Creates a proxy for a variable.
XXX create and cache nested proxies also here.
"""
# FIXME why not just use gensym here?
proxy_val_name = "PROXY_VAL%d__" % self._proxy_count
self._proxy_count += 1
mlabraw.eval(self._session, "%s = %s;" % (proxy_val_name, varname))
res = constructor(self, proxy_val_name, parent)
self._proxies[proxy_val_name] = res
return res
def _get_cell(self, varname):
# XXX can currently only handle ``{}`` and 1D cells
mlabraw.eval(self._session,
"TMP_SIZE_INFO__ = \
[all(size(%(vn)s) == 0), \
min(size(%(vn)s)) == 1 & ndims(%(vn)s) == 2, \
max(size(%(vn)s))];" % {'vn':varname})
is_empty, is_rank1, cell_len = map(int,
self._get("TMP_SIZE_INFO__", remove=True).flat)
if is_empty:
return []
elif is_rank1:
cell_bits = (["TMP%i%s__" % (i, gensym('_'))
for i in range(cell_len)])
mlabraw.eval(self._session, '[%s] = deal(%s{:});' %
(",".join(cell_bits), varname))
# !!! this recursive call means we have to take care with
# overwriting temps!!!
return self._get_values(cell_bits)
else:
raise MlabConversionError("Not a 1D cell array")
def _manually_convert(self, varname, vartype):
if vartype == 'cell':
return self._get_cell(varname)
def _get_values(self, varnames):
if not varnames: raise ValueError("No varnames") #to prevent clear('')
res = []
for varname in varnames:
res.append(self._get(varname))
mlabraw.eval(self._session, "clear('%s');" % "','".join(varnames)) #FIXME wrap try/finally?
return res
def _do(self, cmd, *args, **kwargs):
"""Semi-raw execution of a matlab command.
Smartly handle calls to matlab, figure out what to do with `args`,
and when to use function call syntax and not.
If no `args` are specified, the ``cmd`` not ``result = cmd()`` form is
used in Matlab -- this also makes literal Matlab commands legal
(eg. cmd=``get(gca, 'Children')``).
If ``nout=0`` is specified, the Matlab command is executed as
procedure, otherwise it is executed as function (default), nout
specifying how many values should be returned (default 1).
**Beware that if you use don't specify ``nout=0`` for a `cmd` that
never returns a value will raise an error** (because assigning a
variable to a call that doesn't return a value is illegal in matlab).
``cast`` specifies which typecast should be applied to the result
(e.g. `int`), it defaults to none.
XXX: should we add ``parens`` parameter?
"""
handle_out = kwargs.get('handle_out', _flush_write_stdout)
#self._session = self._session or mlabraw.open()
# HACK
if self._autosync_dirs:
mlabraw.eval(self._session, "cd('%s');" % os.getcwd().replace("'", "''"))
nout = kwargs.get('nout', 1)
#XXX what to do with matlab screen output
argnames = []
tempargs = []
try:
for count, arg in enumerate(args):
if isinstance(arg, MlabObjectProxy):
argnames.append(arg._name)
else:
nextName = 'arg%d__' % count
argnames.append(nextName)
tempargs.append(nextName)
# have to convert these by hand
## try:
## arg = self._as_mlabable_type(arg)
## except TypeError:
## raise TypeError("Illegal argument type (%s.:) for %d. argument" %
## (type(arg), type(count)))
mlabraw.put(self._session, argnames[-1], arg)
if args:
cmd = "%s(%s)%s" % (cmd, ", ".join(argnames),
('',';')[kwargs.get('show',0)])
# got three cases for nout:
# 0 -> None, 1 -> val, >1 -> [val1, val2, ...]
if nout == 0:
handle_out(mlabraw.eval(self._session, cmd))
return
# deal with matlab-style multiple value return
resSL = ((["RES%d__" % i for i in range(nout)]))
handle_out(mlabraw.eval(self._session, '[%s]=%s;' % (", ".join(resSL), cmd)))
res = self._get_values(resSL)
if nout == 1: res = res[0]
else: res = tuple(res)
if kwargs.has_key('cast'):
if nout == 0: raise TypeError("Can't cast: 0 nout")
return kwargs['cast'](res)
else:
return res
finally:
if len(tempargs) and self._clear_call_args:
mlabraw.eval(self._session, "clear('%s');" %
"','".join(tempargs))
# this is really raw, no conversion of [[]] -> [], whatever
def _get(self, name, remove=False):
r"""Directly access a variable in matlab space.
This should normally not be used by user code."""
# FIXME should this really be needed in normal operation?
if name in self._proxies: return self._proxies[name]
varname = name
vartype = self._var_type(varname)
if vartype in self._mlabraw_can_convert:
var = mlabraw.get(self._session, varname)
if isinstance(var, ndarray):
if self._flatten_row_vecs and numpy.shape(var)[0] == 1:
var.shape = var.shape[1:2]
elif self._flatten_col_vecs and numpy.shape(var)[1] == 1:
var.shape = var.shape[0:1]
if self._array_cast:
var = self._array_cast(var)
else:
var = None
if self._dont_proxy.get(vartype):
# manual conversions may fail (e.g. for multidimensional
# cell arrays), in that case just fall back on proxying.
try:
var = self._manually_convert(varname, vartype)
except MlabConversionError: pass
if var is None:
# we can't convert this to a python object, so we just
# create a proxy, and don't delete the real matlab
# reference until the proxy is garbage collected
var = self._make_proxy(varname)
if remove:
mlabraw.eval(self._session, "clear('%s');" % varname)
return var
def _set(self, name, value):
r"""Directly set a variable `name` in matlab space to `value`.
This should normally not be used in user code."""
if isinstance(value, MlabObjectProxy):
mlabraw.eval(self._session, "%s = %s;" % (name, value._name))
else:
## mlabraw.put(self._session, name, self._as_mlabable_type(value))
mlabraw.put(self._session, name, value)
def _make_mlab_command(self, name, nout, doc=None):
def mlab_command(*args, **kwargs):
return self._do(name, *args, **update({'nout':nout}, kwargs))
mlab_command.__doc__ = "\n" + doc
return mlab_command
# XXX this method needs some refactoring, but only after it is clear how
# things should be done (e.g. what should be extracted from docstrings and
# how)
def __getattr__(self, attr):
"""Magically creates a wapper to a matlab function, procedure or
object on-the-fly."""
if re.search(r'\W', attr): # work around ipython <= 0.7.3 bug
raise ValueError("Attributes don't look like this: %r" % attr)
if attr.startswith('__'): raise AttributeError, attr
assert not attr.startswith('_') # XXX
# print_ -> print
if attr[-1] == "_": name = attr[:-1]
else : name = attr
try:
nout = self._do("nargout('%s')" % name)
except mlabraw.error, msg:
typ = numpy.ravel(self._do("exist('%s')" % name))[0]
if typ == 0: # doesn't exist
raise AttributeError("No such matlab object: %s" % name)
else:
warnings.warn(
"Couldn't ascertain number of output args"
"for '%s', assuming 1." % name)
nout = 1
doc = self._do("help('%s')" % name)
# play it safe only return 1st if nout >= 1
# XXX are all ``nout>1``s also useable as ``nout==1``s?
nout = nout and 1
mlab_command = self._make_mlab_command(name, nout, doc)
#!!! attr, *not* name, because we might have python keyword name!
setattr(self, attr, mlab_command)
return mlab_command
mlab = MlabWrap()
MlabError = mlabraw.error
def saveVarsInMat(filename, varNamesStr, outOf=None, **opts):
"""Hacky convinience function to dump a couple of python variables in a
.mat file. See `awmstools.saveVars`.
"""
from mlabwrap import mlab
filename, varnames, outOf = __saveVarsHelper(
filename, varNamesStr, outOf, '.mat', **opts)
try:
for varname in varnames:
mlab._set(varname, outOf[varname])
mlab._do("save('%s','%s')" % (filename, "', '".join(varnames)), nout=0)
finally:
assert varnames
mlab._do("clear('%s')" % "', '".join(varnames), nout=0)
__all__ = ['mlab', 'saveVarsInMat', 'MlabWrap', 'MlabError']
# Uncomment the following line to make the `mlab` object a library so that
# e.g. ``from mlabwrap.mlab import plot`` will work
## if not sys.modules.get('mlabwrap.mlab'): sys.modules['mlabwrap.mlab'] = mlab
| |
from __future__ import unicode_literals
import datetime
from decimal import Decimal
import unittest
from django import test
from django import forms
from django.core import validators
from django.core import checks
from django.core.exceptions import ValidationError
from django.db import connection, transaction, models, IntegrityError
from django.db.models.fields import (
AutoField, BigIntegerField, BinaryField, BooleanField, CharField,
CommaSeparatedIntegerField, DateField, DateTimeField, DecimalField,
EmailField, FilePathField, FloatField, IntegerField, IPAddressField,
GenericIPAddressField, NOT_PROVIDED, NullBooleanField, PositiveIntegerField,
PositiveSmallIntegerField, SlugField, SmallIntegerField, TextField,
TimeField, URLField)
from django.db.models.fields.files import FileField, ImageField
from django.utils import six
from django.utils.functional import lazy
from .models import (
Foo, Bar, Whiz, BigD, BigS, BigIntegerModel, Post, NullBooleanModel,
BooleanModel, PrimaryKeyCharModel, DataModel, Document, RenamedField,
DateTimeModel, VerboseNameField, FksToBooleans, FkToChar, FloatModel,
SmallIntegerModel, IntegerModel, PositiveSmallIntegerModel, PositiveIntegerModel,
WhizIter, WhizIterEmpty)
class BasicFieldTests(test.TestCase):
def test_show_hidden_initial(self):
"""
Regression test for #12913. Make sure fields with choices respect
show_hidden_initial as a kwarg to models.Field.formfield()
"""
choices = [(0, 0), (1, 1)]
model_field = models.Field(choices=choices)
form_field = model_field.formfield(show_hidden_initial=True)
self.assertTrue(form_field.show_hidden_initial)
form_field = model_field.formfield(show_hidden_initial=False)
self.assertFalse(form_field.show_hidden_initial)
def test_nullbooleanfield_blank(self):
"""
Regression test for #13071: NullBooleanField should not throw
a validation error when given a value of None.
"""
nullboolean = NullBooleanModel(nbfield=None)
try:
nullboolean.full_clean()
except ValidationError as e:
self.fail("NullBooleanField failed validation with value of None: %s" % e.messages)
def test_field_repr(self):
"""
Regression test for #5931: __repr__ of a field also displays its name
"""
f = Foo._meta.get_field('a')
self.assertEqual(repr(f), '<django.db.models.fields.CharField: a>')
f = models.fields.CharField()
self.assertEqual(repr(f), '<django.db.models.fields.CharField>')
def test_field_name(self):
"""
Regression test for #14695: explicitly defined field name overwritten
by model's attribute name.
"""
instance = RenamedField()
self.assertTrue(hasattr(instance, 'get_fieldname_display'))
self.assertFalse(hasattr(instance, 'get_modelname_display'))
def test_field_verbose_name(self):
m = VerboseNameField
for i in range(1, 23):
self.assertEqual(m._meta.get_field('field%d' % i).verbose_name,
'verbose field%d' % i)
self.assertEqual(m._meta.get_field('id').verbose_name, 'verbose pk')
def test_float_validates_object(self):
instance = FloatModel(size=2.5)
# Try setting float field to unsaved object
instance.size = instance
with transaction.atomic():
with self.assertRaises(TypeError):
instance.save()
# Set value to valid and save
instance.size = 2.5
instance.save()
self.assertTrue(instance.id)
# Set field to object on saved instance
instance.size = instance
with transaction.atomic():
with self.assertRaises(TypeError):
instance.save()
# Try setting field to object on retrieved object
obj = FloatModel.objects.get(pk=instance.id)
obj.size = obj
with self.assertRaises(TypeError):
obj.save()
def test_choices_form_class(self):
"""Can supply a custom choices form class. Regression for #20999."""
choices = [('a', 'a')]
field = models.CharField(choices=choices)
klass = forms.TypedMultipleChoiceField
self.assertIsInstance(field.formfield(choices_form_class=klass), klass)
def test_field_str(self):
from django.utils.encoding import force_str
f = Foo._meta.get_field('a')
self.assertEqual(force_str(f), "model_fields.Foo.a")
class DecimalFieldTests(test.TestCase):
def test_to_python(self):
f = models.DecimalField(max_digits=4, decimal_places=2)
self.assertEqual(f.to_python(3), Decimal("3"))
self.assertEqual(f.to_python("3.14"), Decimal("3.14"))
self.assertRaises(ValidationError, f.to_python, "abc")
def test_default(self):
f = models.DecimalField(default=Decimal("0.00"))
self.assertEqual(f.get_default(), Decimal("0.00"))
def test_format(self):
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertEqual(f._format(f.to_python(2)), '2.0')
self.assertEqual(f._format(f.to_python('2.6')), '2.6')
self.assertEqual(f._format(None), None)
def test_get_db_prep_lookup(self):
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertEqual(f.get_db_prep_lookup('exact', None, connection=connection), [None])
def test_filter_with_strings(self):
"""
We should be able to filter decimal fields using strings (#8023)
"""
Foo.objects.create(id=1, a='abc', d=Decimal("12.34"))
self.assertEqual(list(Foo.objects.filter(d='1.23')), [])
def test_save_without_float_conversion(self):
"""
Ensure decimals don't go through a corrupting float conversion during
save (#5079).
"""
bd = BigD(d="12.9")
bd.save()
bd = BigD.objects.get(pk=bd.pk)
self.assertEqual(bd.d, Decimal("12.9"))
def test_lookup_really_big_value(self):
"""
Ensure that really big values can be used in a filter statement, even
with older Python versions.
"""
# This should not crash. That counts as a win for our purposes.
Foo.objects.filter(d__gte=100000000000)
class ForeignKeyTests(test.TestCase):
def test_callable_default(self):
"""Test the use of a lazy callable for ForeignKey.default"""
a = Foo.objects.create(id=1, a='abc', d=Decimal("12.34"))
b = Bar.objects.create(b="bcd")
self.assertEqual(b.a, a)
@test.skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_empty_string_fk(self):
"""
Test that foreign key values to empty strings don't get converted
to None (#19299)
"""
char_model_empty = PrimaryKeyCharModel.objects.create(string='')
fk_model_empty = FkToChar.objects.create(out=char_model_empty)
fk_model_empty = FkToChar.objects.select_related('out').get(id=fk_model_empty.pk)
self.assertEqual(fk_model_empty.out, char_model_empty)
def test_warning_when_unique_true_on_fk(self):
class FKUniqueTrue(models.Model):
fk_field = models.ForeignKey(Foo, unique=True)
model = FKUniqueTrue()
expected_warnings = [
checks.Warning(
'Setting unique=True on a ForeignKey has the same effect as using a OneToOneField.',
hint='ForeignKey(unique=True) is usually better served by a OneToOneField.',
obj=FKUniqueTrue.fk_field.field,
id='fields.W342',
)
]
warnings = model.check()
self.assertEqual(warnings, expected_warnings)
def test_related_name_converted_to_text(self):
rel_name = Bar._meta.get_field_by_name('a')[0].rel.related_name
self.assertIsInstance(rel_name, six.text_type)
class DateTimeFieldTests(unittest.TestCase):
def test_datetimefield_to_python_usecs(self):
"""DateTimeField.to_python should support usecs"""
f = models.DateTimeField()
self.assertEqual(f.to_python('2001-01-02 03:04:05.000006'),
datetime.datetime(2001, 1, 2, 3, 4, 5, 6))
self.assertEqual(f.to_python('2001-01-02 03:04:05.999999'),
datetime.datetime(2001, 1, 2, 3, 4, 5, 999999))
def test_timefield_to_python_usecs(self):
"""TimeField.to_python should support usecs"""
f = models.TimeField()
self.assertEqual(f.to_python('01:02:03.000004'),
datetime.time(1, 2, 3, 4))
self.assertEqual(f.to_python('01:02:03.999999'),
datetime.time(1, 2, 3, 999999))
@test.skipUnlessDBFeature("supports_microsecond_precision")
def test_datetimes_save_completely(self):
dat = datetime.date(2014, 3, 12)
datetim = datetime.datetime(2014, 3, 12, 21, 22, 23, 240000)
tim = datetime.time(21, 22, 23, 240000)
DateTimeModel.objects.create(d=dat, dt=datetim, t=tim)
obj = DateTimeModel.objects.first()
self.assertTrue(obj)
self.assertEqual(obj.d, dat)
self.assertEqual(obj.dt, datetim)
self.assertEqual(obj.t, tim)
class BooleanFieldTests(unittest.TestCase):
def _test_get_db_prep_lookup(self, f):
self.assertEqual(f.get_db_prep_lookup('exact', True, connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', '1', connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', 1, connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', False, connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', '0', connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', 0, connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', None, connection=connection), [None])
def _test_to_python(self, f):
self.assertIs(f.to_python(1), True)
self.assertIs(f.to_python(0), False)
def test_booleanfield_get_db_prep_lookup(self):
self._test_get_db_prep_lookup(models.BooleanField())
def test_nullbooleanfield_get_db_prep_lookup(self):
self._test_get_db_prep_lookup(models.NullBooleanField())
def test_booleanfield_to_python(self):
self._test_to_python(models.BooleanField())
def test_nullbooleanfield_to_python(self):
self._test_to_python(models.NullBooleanField())
def test_charfield_textfield_max_length_passed_to_formfield(self):
"""
Test that CharField and TextField pass their max_length attributes to
form fields created using their .formfield() method (#22206).
"""
cf1 = models.CharField()
cf2 = models.CharField(max_length=1234)
self.assertIsNone(cf1.formfield().max_length)
self.assertEqual(1234, cf2.formfield().max_length)
tf1 = models.TextField()
tf2 = models.TextField(max_length=2345)
self.assertIsNone(tf1.formfield().max_length)
self.assertEqual(2345, tf2.formfield().max_length)
def test_booleanfield_choices_blank(self):
"""
Test that BooleanField with choices and defaults doesn't generate a
formfield with the blank option (#9640, #10549).
"""
choices = [(1, 'Si'), (2, 'No')]
f = models.BooleanField(choices=choices, default=1, null=False)
self.assertEqual(f.formfield().choices, choices)
def test_return_type(self):
b = BooleanModel()
b.bfield = True
b.save()
b2 = BooleanModel.objects.get(pk=b.pk)
self.assertIsInstance(b2.bfield, bool)
self.assertEqual(b2.bfield, True)
b3 = BooleanModel()
b3.bfield = False
b3.save()
b4 = BooleanModel.objects.get(pk=b3.pk)
self.assertIsInstance(b4.bfield, bool)
self.assertEqual(b4.bfield, False)
b = NullBooleanModel()
b.nbfield = True
b.save()
b2 = NullBooleanModel.objects.get(pk=b.pk)
self.assertIsInstance(b2.nbfield, bool)
self.assertEqual(b2.nbfield, True)
b3 = NullBooleanModel()
b3.nbfield = False
b3.save()
b4 = NullBooleanModel.objects.get(pk=b3.pk)
self.assertIsInstance(b4.nbfield, bool)
self.assertEqual(b4.nbfield, False)
# http://code.djangoproject.com/ticket/13293
# Verify that when an extra clause exists, the boolean
# conversions are applied with an offset
b5 = BooleanModel.objects.all().extra(
select={'string_col': 'string'})[0]
self.assertNotIsInstance(b5.pk, bool)
def test_select_related(self):
"""
Test type of boolean fields when retrieved via select_related() (MySQL,
#15040)
"""
bmt = BooleanModel.objects.create(bfield=True)
bmf = BooleanModel.objects.create(bfield=False)
nbmt = NullBooleanModel.objects.create(nbfield=True)
nbmf = NullBooleanModel.objects.create(nbfield=False)
m1 = FksToBooleans.objects.create(bf=bmt, nbf=nbmt)
m2 = FksToBooleans.objects.create(bf=bmf, nbf=nbmf)
# Test select_related('fk_field_name')
ma = FksToBooleans.objects.select_related('bf').get(pk=m1.id)
# verify types -- should't be 0/1
self.assertIsInstance(ma.bf.bfield, bool)
self.assertIsInstance(ma.nbf.nbfield, bool)
# verify values
self.assertEqual(ma.bf.bfield, True)
self.assertEqual(ma.nbf.nbfield, True)
# Test select_related()
mb = FksToBooleans.objects.select_related().get(pk=m1.id)
mc = FksToBooleans.objects.select_related().get(pk=m2.id)
# verify types -- shouldn't be 0/1
self.assertIsInstance(mb.bf.bfield, bool)
self.assertIsInstance(mb.nbf.nbfield, bool)
self.assertIsInstance(mc.bf.bfield, bool)
self.assertIsInstance(mc.nbf.nbfield, bool)
# verify values
self.assertEqual(mb.bf.bfield, True)
self.assertEqual(mb.nbf.nbfield, True)
self.assertEqual(mc.bf.bfield, False)
self.assertEqual(mc.nbf.nbfield, False)
def test_null_default(self):
"""
Check that a BooleanField defaults to None -- which isn't
a valid value (#15124).
"""
# Patch the boolean field's default value. We give it a default
# value when defining the model to satisfy the check tests
# #20895.
boolean_field = BooleanModel._meta.get_field('bfield')
self.assertTrue(boolean_field.has_default())
old_default = boolean_field.default
try:
boolean_field.default = NOT_PROVIDED
# check patch was successful
self.assertFalse(boolean_field.has_default())
b = BooleanModel()
self.assertIsNone(b.bfield)
with self.assertRaises(IntegrityError):
b.save()
finally:
boolean_field.default = old_default
nb = NullBooleanModel()
self.assertIsNone(nb.nbfield)
nb.save() # no error
class ChoicesTests(test.TestCase):
def test_choices_and_field_display(self):
"""
Check that get_choices and get_flatchoices interact with
get_FIELD_display to return the expected values (#7913).
"""
self.assertEqual(Whiz(c=1).get_c_display(), 'First') # A nested value
self.assertEqual(Whiz(c=0).get_c_display(), 'Other') # A top level value
self.assertEqual(Whiz(c=9).get_c_display(), 9) # Invalid value
self.assertEqual(Whiz(c=None).get_c_display(), None) # Blank value
self.assertEqual(Whiz(c='').get_c_display(), '') # Empty value
def test_iterator_choices(self):
"""
Check that get_choices works with Iterators (#23112).
"""
self.assertEqual(WhizIter(c=1).c, 1) # A nested value
self.assertEqual(WhizIter(c=9).c, 9) # Invalid value
self.assertEqual(WhizIter(c=None).c, None) # Blank value
self.assertEqual(WhizIter(c='').c, '') # Empty value
def test_empty_iterator_choices(self):
"""
Check that get_choices works with empty iterators (#23112).
"""
self.assertEqual(WhizIterEmpty(c="a").c, "a") # A nested value
self.assertEqual(WhizIterEmpty(c="b").c, "b") # Invalid value
self.assertEqual(WhizIterEmpty(c=None).c, None) # Blank value
self.assertEqual(WhizIterEmpty(c='').c, '') # Empty value
def test_charfield_get_choices_with_blank_iterator(self):
"""
Check that get_choices works with an empty Iterator
"""
f = models.CharField(choices=(x for x in []))
self.assertEqual(f.get_choices(include_blank=True), [('', '---------')])
class SlugFieldTests(test.TestCase):
def test_slugfield_max_length(self):
"""
Make sure SlugField honors max_length (#9706)
"""
bs = BigS.objects.create(s='slug' * 50)
bs = BigS.objects.get(pk=bs.pk)
self.assertEqual(bs.s, 'slug' * 50)
class ValidationTest(test.TestCase):
def test_charfield_raises_error_on_empty_string(self):
f = models.CharField()
self.assertRaises(ValidationError, f.clean, "", None)
def test_charfield_cleans_empty_string_when_blank_true(self):
f = models.CharField(blank=True)
self.assertEqual('', f.clean('', None))
def test_integerfield_cleans_valid_string(self):
f = models.IntegerField()
self.assertEqual(2, f.clean('2', None))
def test_integerfield_raises_error_on_invalid_intput(self):
f = models.IntegerField()
self.assertRaises(ValidationError, f.clean, "a", None)
def test_charfield_with_choices_cleans_valid_choice(self):
f = models.CharField(max_length=1,
choices=[('a', 'A'), ('b', 'B')])
self.assertEqual('a', f.clean('a', None))
def test_charfield_with_choices_raises_error_on_invalid_choice(self):
f = models.CharField(choices=[('a', 'A'), ('b', 'B')])
self.assertRaises(ValidationError, f.clean, "not a", None)
def test_charfield_get_choices_with_blank_defined(self):
f = models.CharField(choices=[('', '<><>'), ('a', 'A')])
self.assertEqual(f.get_choices(True), [('', '<><>'), ('a', 'A')])
def test_charfield_get_choices_doesnt_evaluate_lazy_strings(self):
# Regression test for #23098
# Will raise ZeroDivisionError if lazy is evaluated
lazy_func = lazy(lambda x: 0 / 0, int)
f = models.CharField(choices=[(lazy_func('group'), (('a', 'A'), ('b', 'B')))])
self.assertEqual(f.get_choices(True)[0], ('', '---------'))
def test_choices_validation_supports_named_groups(self):
f = models.IntegerField(
choices=(('group', ((10, 'A'), (20, 'B'))), (30, 'C')))
self.assertEqual(10, f.clean(10, None))
def test_nullable_integerfield_raises_error_with_blank_false(self):
f = models.IntegerField(null=True, blank=False)
self.assertRaises(ValidationError, f.clean, None, None)
def test_nullable_integerfield_cleans_none_on_null_and_blank_true(self):
f = models.IntegerField(null=True, blank=True)
self.assertEqual(None, f.clean(None, None))
def test_integerfield_raises_error_on_empty_input(self):
f = models.IntegerField(null=False)
self.assertRaises(ValidationError, f.clean, None, None)
self.assertRaises(ValidationError, f.clean, '', None)
def test_integerfield_validates_zero_against_choices(self):
f = models.IntegerField(choices=((1, 1),))
self.assertRaises(ValidationError, f.clean, '0', None)
def test_charfield_raises_error_on_empty_input(self):
f = models.CharField(null=False)
self.assertRaises(ValidationError, f.clean, None, None)
def test_datefield_cleans_date(self):
f = models.DateField()
self.assertEqual(datetime.date(2008, 10, 10), f.clean('2008-10-10', None))
def test_boolean_field_doesnt_accept_empty_input(self):
f = models.BooleanField()
self.assertRaises(ValidationError, f.clean, None, None)
class IntegerFieldTests(test.TestCase):
model = IntegerModel
documented_range = (-2147483648, 2147483647)
def test_documented_range(self):
"""
Ensure that values within the documented safe range pass validation,
can be saved and retrieved without corruption.
"""
min_value, max_value = self.documented_range
instance = self.model(value=min_value)
instance.full_clean()
instance.save()
qs = self.model.objects.filter(value__lte=min_value)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].value, min_value)
instance = self.model(value=max_value)
instance.full_clean()
instance.save()
qs = self.model.objects.filter(value__gte=max_value)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].value, max_value)
def test_backend_range_validation(self):
"""
Ensure that backend specific range are enforced at the model
validation level. ref #12030.
"""
field = self.model._meta.get_field('value')
internal_type = field.get_internal_type()
min_value, max_value = connection.ops.integer_field_range(internal_type)
if min_value is not None:
instance = self.model(value=min_value - 1)
expected_message = validators.MinValueValidator.message % {
'limit_value': min_value
}
with self.assertRaisesMessage(ValidationError, expected_message):
instance.full_clean()
instance.value = min_value
instance.full_clean()
if max_value is not None:
instance = self.model(value=max_value + 1)
expected_message = validators.MaxValueValidator.message % {
'limit_value': max_value
}
with self.assertRaisesMessage(ValidationError, expected_message):
instance.full_clean()
instance.value = max_value
instance.full_clean()
def test_types(self):
instance = self.model(value=0)
self.assertIsInstance(instance.value, six.integer_types)
instance.save()
self.assertIsInstance(instance.value, six.integer_types)
instance = self.model.objects.get()
self.assertIsInstance(instance.value, six.integer_types)
def test_coercing(self):
self.model.objects.create(value='10')
instance = self.model.objects.get(value='10')
self.assertEqual(instance.value, 10)
class SmallIntegerFieldTests(IntegerFieldTests):
model = SmallIntegerModel
documented_range = (-32768, 32767)
class BigIntegerFieldTests(IntegerFieldTests):
model = BigIntegerModel
documented_range = (-9223372036854775808, 9223372036854775807)
class PositiveSmallIntegerFieldTests(IntegerFieldTests):
model = PositiveSmallIntegerModel
documented_range = (0, 32767)
class PositiveIntegerFieldTests(IntegerFieldTests):
model = PositiveIntegerModel
documented_range = (0, 2147483647)
class TypeCoercionTests(test.TestCase):
"""
Test that database lookups can accept the wrong types and convert
them with no error: especially on Postgres 8.3+ which does not do
automatic casting at the DB level. See #10015.
"""
def test_lookup_integer_in_charfield(self):
self.assertEqual(Post.objects.filter(title=9).count(), 0)
def test_lookup_integer_in_textfield(self):
self.assertEqual(Post.objects.filter(body=24).count(), 0)
class FileFieldTests(unittest.TestCase):
def test_clearable(self):
"""
Test that FileField.save_form_data will clear its instance attribute
value if passed False.
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, False)
self.assertEqual(d.myfile, '')
def test_unchanged(self):
"""
Test that FileField.save_form_data considers None to mean "no change"
rather than "clear".
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, None)
self.assertEqual(d.myfile, 'something.txt')
def test_changed(self):
"""
Test that FileField.save_form_data, if passed a truthy value, updates
its instance attribute.
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, 'else.txt')
self.assertEqual(d.myfile, 'else.txt')
def test_delete_when_file_unset(self):
"""
Calling delete on an unset FileField should not call the file deletion
process, but fail silently (#20660).
"""
d = Document()
try:
d.myfile.delete()
except OSError:
self.fail("Deleting an unset FileField should not raise OSError.")
class BinaryFieldTests(test.TestCase):
binary_data = b'\x00\x46\xFE'
@test.skipUnlessDBFeature('supports_binary_field')
def test_set_and_retrieve(self):
data_set = (self.binary_data, six.memoryview(self.binary_data))
for bdata in data_set:
dm = DataModel(data=bdata)
dm.save()
dm = DataModel.objects.get(pk=dm.pk)
self.assertEqual(bytes(dm.data), bytes(bdata))
# Resave (=update)
dm.save()
dm = DataModel.objects.get(pk=dm.pk)
self.assertEqual(bytes(dm.data), bytes(bdata))
# Test default value
self.assertEqual(bytes(dm.short_data), b'\x08')
def test_max_length(self):
dm = DataModel(short_data=self.binary_data * 4)
self.assertRaises(ValidationError, dm.full_clean)
class GenericIPAddressFieldTests(test.TestCase):
def test_genericipaddressfield_formfield_protocol(self):
"""
Test that GenericIPAddressField with a specified protocol does not
generate a formfield with no specified protocol. See #20740.
"""
model_field = models.GenericIPAddressField(protocol='IPv4')
form_field = model_field.formfield()
self.assertRaises(ValidationError, form_field.clean, '::1')
model_field = models.GenericIPAddressField(protocol='IPv6')
form_field = model_field.formfield()
self.assertRaises(ValidationError, form_field.clean, '127.0.0.1')
class PromiseTest(test.TestCase):
def test_AutoField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
AutoField(primary_key=True).get_prep_value(lazy_func()),
int)
@unittest.skipIf(six.PY3, "Python 3 has no `long` type.")
def test_BigIntegerField(self):
lazy_func = lazy(lambda: long(9999999999999999999), long)
self.assertIsInstance(
BigIntegerField().get_prep_value(lazy_func()),
long)
def test_BinaryField(self):
lazy_func = lazy(lambda: b'', bytes)
self.assertIsInstance(
BinaryField().get_prep_value(lazy_func()),
bytes)
def test_BooleanField(self):
lazy_func = lazy(lambda: True, bool)
self.assertIsInstance(
BooleanField().get_prep_value(lazy_func()),
bool)
def test_CharField(self):
lazy_func = lazy(lambda: '', six.text_type)
self.assertIsInstance(
CharField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
CharField().get_prep_value(lazy_func()),
six.text_type)
def test_CommaSeparatedIntegerField(self):
lazy_func = lazy(lambda: '1,2', six.text_type)
self.assertIsInstance(
CommaSeparatedIntegerField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
CommaSeparatedIntegerField().get_prep_value(lazy_func()),
six.text_type)
def test_DateField(self):
lazy_func = lazy(lambda: datetime.date.today(), datetime.date)
self.assertIsInstance(
DateField().get_prep_value(lazy_func()),
datetime.date)
def test_DateTimeField(self):
lazy_func = lazy(lambda: datetime.datetime.now(), datetime.datetime)
self.assertIsInstance(
DateTimeField().get_prep_value(lazy_func()),
datetime.datetime)
def test_DecimalField(self):
lazy_func = lazy(lambda: Decimal('1.2'), Decimal)
self.assertIsInstance(
DecimalField().get_prep_value(lazy_func()),
Decimal)
def test_EmailField(self):
lazy_func = lazy(lambda: 'mailbox@domain.com', six.text_type)
self.assertIsInstance(
EmailField().get_prep_value(lazy_func()),
six.text_type)
def test_FileField(self):
lazy_func = lazy(lambda: 'filename.ext', six.text_type)
self.assertIsInstance(
FileField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
FileField().get_prep_value(lazy_func()),
six.text_type)
def test_FilePathField(self):
lazy_func = lazy(lambda: 'tests.py', six.text_type)
self.assertIsInstance(
FilePathField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
FilePathField().get_prep_value(lazy_func()),
six.text_type)
def test_FloatField(self):
lazy_func = lazy(lambda: 1.2, float)
self.assertIsInstance(
FloatField().get_prep_value(lazy_func()),
float)
def test_ImageField(self):
lazy_func = lazy(lambda: 'filename.ext', six.text_type)
self.assertIsInstance(
ImageField().get_prep_value(lazy_func()),
six.text_type)
def test_IntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
IntegerField().get_prep_value(lazy_func()),
int)
def test_IPAddressField(self):
lazy_func = lazy(lambda: '127.0.0.1', six.text_type)
self.assertIsInstance(
IPAddressField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
IPAddressField().get_prep_value(lazy_func()),
six.text_type)
def test_IPAddressField_deprecated(self):
class IPAddressModel(models.Model):
ip = IPAddressField()
model = IPAddressModel()
self.assertEqual(
model.check(),
[checks.Warning(
'IPAddressField has been deprecated. Support for it '
'(except in historical migrations) will be removed in Django 1.9.',
hint='Use GenericIPAddressField instead.',
obj=IPAddressModel._meta.get_field('ip'),
id='fields.W900',
)],
)
def test_GenericIPAddressField(self):
lazy_func = lazy(lambda: '127.0.0.1', six.text_type)
self.assertIsInstance(
GenericIPAddressField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
GenericIPAddressField().get_prep_value(lazy_func()),
six.text_type)
def test_NullBooleanField(self):
lazy_func = lazy(lambda: True, bool)
self.assertIsInstance(
NullBooleanField().get_prep_value(lazy_func()),
bool)
def test_PositiveIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
PositiveIntegerField().get_prep_value(lazy_func()),
int)
def test_PositiveSmallIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
PositiveSmallIntegerField().get_prep_value(lazy_func()),
int)
def test_SlugField(self):
lazy_func = lazy(lambda: 'slug', six.text_type)
self.assertIsInstance(
SlugField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
SlugField().get_prep_value(lazy_func()),
six.text_type)
def test_SmallIntegerField(self):
lazy_func = lazy(lambda: 1, int)
self.assertIsInstance(
SmallIntegerField().get_prep_value(lazy_func()),
int)
def test_TextField(self):
lazy_func = lazy(lambda: 'Abc', six.text_type)
self.assertIsInstance(
TextField().get_prep_value(lazy_func()),
six.text_type)
lazy_func = lazy(lambda: 0, int)
self.assertIsInstance(
TextField().get_prep_value(lazy_func()),
six.text_type)
def test_TimeField(self):
lazy_func = lazy(lambda: datetime.datetime.now().time(), datetime.time)
self.assertIsInstance(
TimeField().get_prep_value(lazy_func()),
datetime.time)
def test_URLField(self):
lazy_func = lazy(lambda: 'http://domain.com', six.text_type)
self.assertIsInstance(
URLField().get_prep_value(lazy_func()),
six.text_type)
class CustomFieldTests(unittest.TestCase):
def test_14786(self):
"""
Regression test for #14786 -- Test that field values are not prepared
twice in get_db_prep_lookup().
"""
class NoopField(models.TextField):
def __init__(self, *args, **kwargs):
self.prep_value_count = 0
super(NoopField, self).__init__(*args, **kwargs)
def get_prep_value(self, value):
self.prep_value_count += 1
return super(NoopField, self).get_prep_value(value)
field = NoopField()
field.get_db_prep_lookup(
'exact', 'TEST', connection=connection, prepared=False
)
self.assertEqual(field.prep_value_count, 1)
| |
# Copyright 2011 OpenStack LLC. # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Host Filters.
"""
import json
from nova import context
from nova import exception
from nova import flags
from nova.scheduler import filters
from nova import test
from nova.tests.scheduler import fakes
from nova import utils
class TestFilter(filters.BaseHostFilter):
pass
class TestBogusFilter(object):
"""Class that doesn't inherit from BaseHostFilter"""
pass
class HostFiltersTestCase(test.TestCase):
"""Test case for host filters."""
def setUp(self):
super(HostFiltersTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.json_query = json.dumps(
['and', ['>=', '$free_ram_mb', 1024],
['>=', '$free_disk_mb', 200 * 1024]])
# This has a side effect of testing 'get_filter_classes'
# when specifying a method (in this case, our standard filters)
classes = filters.get_filter_classes(
['nova.scheduler.filters.standard_filters'])
self.class_map = {}
for cls in classes:
self.class_map[cls.__name__] = cls
def test_get_filter_classes(self):
classes = filters.get_filter_classes(
['nova.tests.scheduler.test_host_filters.TestFilter'])
self.assertEqual(len(classes), 1)
self.assertEqual(classes[0].__name__, 'TestFilter')
# Test a specific class along with our standard filters
classes = filters.get_filter_classes(
['nova.tests.scheduler.test_host_filters.TestFilter',
'nova.scheduler.filters.standard_filters'])
self.assertEqual(len(classes), 1 + len(self.class_map))
def test_get_filter_classes_raises_on_invalid_classes(self):
self.assertRaises(ImportError,
filters.get_filter_classes,
['nova.tests.scheduler.test_host_filters.NoExist'])
self.assertRaises(exception.ClassNotFound,
filters.get_filter_classes,
['nova.tests.scheduler.test_host_filters.TestBogusFilter'])
def test_all_host_filter(self):
filt_cls = self.class_map['AllHostsFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(filt_cls.host_passes(host, {}))
def _stub_service_is_up(self, ret_value):
def fake_service_is_up(service):
return ret_value
self.stubs.Set(utils, 'service_is_up', fake_service_is_up)
def test_affinity_different_filter_passes(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [instance_uuid], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_different_filter_fails(self):
filt_cls = self.class_map['DifferentHostFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'different_host': [instance_uuid], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_passes(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host1'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [instance_uuid], }}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_same_filter_fails(self):
filt_cls = self.class_map['SameHostFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
instance = fakes.FakeInstance(context=self.context,
params={'host': 'host2'})
instance_uuid = instance.uuid
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'same_host': [instance_uuid], }}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_passes(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
affinity_ip = flags.FLAGS.my_ip.split('.')[0:3]
affinity_ip.append('100')
affinity_ip = str.join('.', affinity_ip)
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'cidr': '/24',
'build_near_host_ip': affinity_ip}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_affinity_simple_cidr_filter_fails(self):
filt_cls = self.class_map['SimpleCIDRAffinityFilter']()
host = fakes.FakeHostState('host1', 'compute', {})
affinity_ip = flags.FLAGS.my_ip.split('.')
affinity_ip[-1] = '100' if affinity_ip[-1] != '100' else '101'
affinity_ip = str.join('.', affinity_ip)
filter_properties = {'context': self.context.elevated(),
'scheduler_hints': {
'cidr': '/32',
'build_near_host_ip': affinity_ip}}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_type_filter(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['TypeAffinityFilter']()
filter_properties = {'context': self.context,
'instance_type': {'id': 1}}
filter2_properties = {'context': self.context,
'instance_type': {'id': 2}}
capabilities = {'enabled': True}
service = {'disabled': False}
host = fakes.FakeHostState('fake_host', 'compute',
{'capabilities': capabilities,
'service': service})
#True since empty
self.assertTrue(filt_cls.host_passes(host, filter_properties))
fakes.FakeInstance(context=self.context,
params={'host': 'fake_host', 'instance_type_id': 1})
#True since same type
self.assertTrue(filt_cls.host_passes(host, filter_properties))
#False since different type
self.assertFalse(filt_cls.host_passes(host, filter2_properties))
#False since node not homogeneous
fakes.FakeInstance(context=self.context,
params={'host': 'fake_host', 'instance_type_id': 2})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_ram_filter_fails_on_memory(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['RamFilter']()
self.flags(ram_allocation_ratio=1.0)
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1023, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_on_service_disabled(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': True}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_on_service_down(self):
self._stub_service_is_up(False)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': True}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes_on_volume(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024}}
capabilities = {'enabled': False}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'volume',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes_on_no_instance_type(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
filter_properties = {}
capabilities = {'enabled': False}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes_extra_specs(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt1': 1, 'opt2': 2}
capabilities = {'enabled': True, 'opt1': 1, 'opt2': 2}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_extra_specs(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt1': 1, 'opt2': 3}
capabilities = {'enabled': True, 'opt1': 1, 'opt2': 2}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes_extra_specs_with_op_eq(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt1': '= 123'}
capabilities = {'enabled': True, 'opt1': '123'}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes_extra_specs_with_op_eq2(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt1': '= 123'}
capabilities = {'enabled': True, 'opt1': '124'}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_extra_specs_with_op_eq(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt2': '= 234'}
capabilities = {'enabled': True, 'opt2': '34'}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes_extra_specs_with_op_seq(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt1': 's== 123'}
capabilities = {'enabled': True, 'opt1': '123'}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_extra_specs_with_op_seq(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt2': 's== 234'}
capabilities = {'enabled': True, 'opt2': '2345'}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes_extra_specs_with_op_sneq(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt1': 's!= 123'}
capabilities = {'enabled': True, 'opt1': '11'}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_extra_specs_with_op_sneq(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt2': 's!= 234'}
capabilities = {'enabled': True, 'opt2': '234'}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes_extra_specs_with_op_sgle(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt1': 's<= 123', 'opt2': 's>= 43'}
capabilities = {'enabled': True, 'opt1': '11', 'opt2': '543'}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_extra_specs_with_op_sge(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt2': 's>= 234'}
capabilities = {'enabled': True, 'opt2': '1000'}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_extra_specs_with_op_sle(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt2': 's<= 1000'}
capabilities = {'enabled': True, 'opt2': '234'}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes_extra_specs_with_op_sgl(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt1': 's< 123', 'opt2': 's> 43'}
capabilities = {'enabled': True, 'opt1': '11', 'opt2': '543'}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_extra_specs_with_op_sl(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt2': 's< 12'}
capabilities = {'enabled': True, 'opt2': '2'}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_extra_specs_with_op_sg(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt2': 's> 2'}
capabilities = {'enabled': True, 'opt2': '12'}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes_extra_specs_with_op_in(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt1': '<in> 11'}
capabilities = {'enabled': True, 'opt1': '12311321'}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_extra_specs_with_op_in(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt1': '<in> 11'}
capabilities = {'enabled': True, 'opt1': '12310321'}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes_extra_specs_with_op_or(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt1': '<or> 11 <or> 12'}
capabilities = {'enabled': True, 'opt1': '12'}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_extra_specs_with_op_or(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt1': '<or> 11 <or> 12'}
capabilities = {'enabled': True, 'opt1': '13'}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes_extra_specs_with_op_le(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt1': '<= 10', 'opt2': '<=+ 20'}
capabilities = {'enabled': True, 'opt1': 2, 'opt2': 2}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_extra_specs_with_op_le(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt1': '<= 2', 'opt2': '<=- 2'}
capabilities = {'enabled': True, 'opt1': 1, 'opt2': 3}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_passes_extra_specs_with_op_ge(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt1': '>= 1', 'opt2': '>=+ 2'}
capabilities = {'enabled': True, 'opt1': 2, 'opt2': 2}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_compute_filter_fails_extra_specs_with_op_ge(self):
self._stub_service_is_up(True)
filt_cls = self.class_map['ComputeFilter']()
extra_specs = {'opt1': '>= 2', 'opt2': '>=- 2'}
capabilities = {'enabled': True, 'opt1': 1, 'opt2': 2}
service = {'disabled': False}
filter_properties = {'instance_type': {'memory_mb': 1024,
'extra_specs': extra_specs}}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024, 'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_fails_isolated_on_non_isolated(self):
self.flags(isolated_images=['isolated'], isolated_hosts=['isolated'])
filt_cls = self.class_map['IsolatedHostsFilter']()
filter_properties = {
'request_spec': {
'instance_properties': {'image_ref': 'isolated'}
}
}
host = fakes.FakeHostState('non-isolated', 'compute', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_fails_non_isolated_on_isolated(self):
self.flags(isolated_images=['isolated'], isolated_hosts=['isolated'])
filt_cls = self.class_map['IsolatedHostsFilter']()
filter_properties = {
'request_spec': {
'instance_properties': {'image_ref': 'non-isolated'}
}
}
host = fakes.FakeHostState('isolated', 'compute', {})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_passes_isolated_on_isolated(self):
self.flags(isolated_images=['isolated'], isolated_hosts=['isolated'])
filt_cls = self.class_map['IsolatedHostsFilter']()
filter_properties = {
'request_spec': {
'instance_properties': {'image_ref': 'isolated'}
}
}
host = fakes.FakeHostState('isolated', 'compute', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_isolated_hosts_passes_non_isolated_on_non_isolated(self):
self.flags(isolated_images=['isolated'], isolated_hosts=['isolated'])
filt_cls = self.class_map['IsolatedHostsFilter']()
filter_properties = {
'request_spec': {
'instance_properties': {'image_ref': 'non-isolated'}
}
}
host = fakes.FakeHostState('non-isolated', 'compute', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_passes(self):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_passes_with_no_query(self):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0}}
capabilities = {'enabled': True}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 0,
'free_disk_mb': 0,
'capabilities': capabilities})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_memory(self):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1023,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_disk(self):
filt_cls = self.class_map['JsonFilter']()
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': self.json_query}}
capabilities = {'enabled': True}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024,
'free_disk_mb': (200 * 1024) - 1,
'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_caps_disabled(self):
filt_cls = self.class_map['JsonFilter']()
json_query = json.dumps(
['and', ['>=', '$free_ram_mb', 1024],
['>=', '$free_disk_mb', 200 * 1024],
'$capabilities.enabled'])
filter_properties = {'instance_type': {'memory_mb': 1024,
'root_gb': 200,
'ephemeral_gb': 0},
'scheduler_hints': {'query': json_query}}
capabilities = {'enabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_fails_on_service_disabled(self):
filt_cls = self.class_map['JsonFilter']()
json_query = json.dumps(
['and', ['>=', '$free_ram_mb', 1024],
['>=', '$free_disk_mb', 200 * 1024],
['not', '$service.disabled']])
filter_properties = {'instance_type': {'memory_mb': 1024,
'local_gb': 200},
'scheduler_hints': {'query': json_query}}
capabilities = {'enabled': True}
service = {'disabled': True}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 1024,
'free_disk_mb': 200 * 1024,
'capabilities': capabilities})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_happy_day(self):
"""Test json filter more thoroughly"""
filt_cls = self.class_map['JsonFilter']()
raw = ['and',
'$capabilities.enabled',
['=', '$capabilities.opt1', 'match'],
['or',
['and',
['<', '$free_ram_mb', 30],
['<', '$free_disk_mb', 300]],
['and',
['>', '$free_ram_mb', 30],
['>', '$free_disk_mb', 300]]]]
filter_properties = {'scheduler_hints': {'query': json.dumps(raw)}}
# Passes
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 10,
'free_disk_mb': 200,
'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
# Passes
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
# Failes due to caps disabled
capabilities = {'enabled': False, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'instance_type',
{'free_ram_mb': 40,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
# Fails due to being exact memory/disk we don't want
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 30,
'free_disk_mb': 300,
'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
# Fails due to memory lower but disk higher
capabilities = {'enabled': True, 'opt1': 'match'}
service = {'disabled': False}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
# Fails due to capabilities 'opt1' not equal
capabilities = {'enabled': True, 'opt1': 'no-match'}
service = {'enabled': True}
host = fakes.FakeHostState('host1', 'compute',
{'free_ram_mb': 20,
'free_disk_mb': 400,
'capabilities': capabilities,
'service': service})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_basic_operators(self):
filt_cls = self.class_map['JsonFilter']()
host = fakes.FakeHostState('host1', 'compute',
{'capabilities': {'enabled': True}})
# (operator, arguments, expected_result)
ops_to_test = [
['=', [1, 1], True],
['=', [1, 2], False],
['<', [1, 2], True],
['<', [1, 1], False],
['<', [2, 1], False],
['>', [2, 1], True],
['>', [2, 2], False],
['>', [2, 3], False],
['<=', [1, 2], True],
['<=', [1, 1], True],
['<=', [2, 1], False],
['>=', [2, 1], True],
['>=', [2, 2], True],
['>=', [2, 3], False],
['in', [1, 1], True],
['in', [1, 1, 2, 3], True],
['in', [4, 1, 2, 3], False],
['not', [True], False],
['not', [False], True],
['or', [True, False], True],
['or', [False, False], False],
['and', [True, True], True],
['and', [False, False], False],
['and', [True, False], False],
# Nested ((True or False) and (2 > 1)) == Passes
['and', [['or', True, False], ['>', 2, 1]], True]]
for (op, args, expected) in ops_to_test:
raw = [op] + args
filter_properties = {'scheduler_hints': {'query': json.dumps(raw)}}
self.assertEqual(expected,
filt_cls.host_passes(host, filter_properties))
# This results in [False, True, False, True] and if any are True
# then it passes...
raw = ['not', True, False, True, False]
filter_properties = {'scheduler_hints': {'query': json.dumps(raw)}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
# This results in [False, False, False] and if any are True
# then it passes...which this doesn't
raw = ['not', True, True, True]
filter_properties = {'scheduler_hints': {'query': json.dumps(raw)}}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_unknown_operator_raises(self):
filt_cls = self.class_map['JsonFilter']()
raw = ['!=', 1, 2]
filter_properties = {'scheduler_hints': {'query': json.dumps(raw)}}
host = fakes.FakeHostState('host1', 'compute',
{'capabilities': {'enabled': True}})
self.assertRaises(KeyError,
filt_cls.host_passes, host, filter_properties)
def test_json_filter_empty_filters_pass(self):
filt_cls = self.class_map['JsonFilter']()
host = fakes.FakeHostState('host1', 'compute',
{'capabilities': {'enabled': True}})
raw = []
filter_properties = {'scheduler_hints': {'query': json.dumps(raw)}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
raw = {}
filter_properties = {'scheduler_hints': {'query': json.dumps(raw)}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_json_filter_invalid_num_arguments_fails(self):
filt_cls = self.class_map['JsonFilter']()
host = fakes.FakeHostState('host1', 'compute',
{'capabilities': {'enabled': True}})
raw = ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
filter_properties = {'scheduler_hints': {'query': json.dumps(raw)}}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
raw = ['>', 1]
filter_properties = {'scheduler_hints': {'query': json.dumps(raw)}}
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_unknown_variable_ignored(self):
filt_cls = self.class_map['JsonFilter']()
host = fakes.FakeHostState('host1', 'compute',
{'capabilities': {'enabled': True}})
raw = ['=', '$........', 1, 1]
filter_properties = {'scheduler_hints': {'query': json.dumps(raw)}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
raw = ['=', '$foo', 2, 2]
filter_properties = {'scheduler_hints': {'query': json.dumps(raw)}}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_passes(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
host = fakes.FakeHostState('host1', 'compute',
{'vcpus_total': 4, 'vcpus_used': 7})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails_safe(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
host = fakes.FakeHostState('host1', 'compute', {})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_core_filter_fails(self):
filt_cls = self.class_map['CoreFilter']()
filter_properties = {'instance_type': {'vcpus': 1}}
self.flags(cpu_allocation_ratio=2)
host = fakes.FakeHostState('host1', 'compute',
{'vcpus_total': 4, 'vcpus_used': 8})
self.assertFalse(filt_cls.host_passes(host, filter_properties))
@staticmethod
def _make_zone_request(zone, is_admin=False):
ctxt = context.RequestContext('fake', 'fake', is_admin=is_admin)
return {
'context': ctxt,
'request_spec': {
'instance_properties': {
'availability_zone': zone
}
}
}
def test_availability_zone_filter_same(self):
filt_cls = self.class_map['AvailabilityZoneFilter']()
service = {'availability_zone': 'nova'}
request = self._make_zone_request('nova')
host = fakes.FakeHostState('host1', 'compute', {'service': service})
self.assertTrue(filt_cls.host_passes(host, request))
def test_availability_zone_filter_different(self):
filt_cls = self.class_map['AvailabilityZoneFilter']()
service = {'availability_zone': 'nova'}
request = self._make_zone_request('bad')
host = fakes.FakeHostState('host1', 'compute', {'service': service})
self.assertFalse(filt_cls.host_passes(host, request))
| |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/tensorboard/plugins/projector/projector_config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/tensorboard/plugins/projector/projector_config.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n?tensorflow/tensorboard/plugins/projector/projector_config.proto\x12\ntensorflow\">\n\x0eSpriteMetadata\x12\x12\n\nimage_path\x18\x01 \x01(\t\x12\x18\n\x10single_image_dim\x18\x02 \x03(\r\"\xaa\x01\n\rEmbeddingInfo\x12\x13\n\x0btensor_name\x18\x01 \x01(\t\x12\x15\n\rmetadata_path\x18\x02 \x01(\t\x12\x16\n\x0e\x62ookmarks_path\x18\x03 \x01(\t\x12\x14\n\x0ctensor_shape\x18\x04 \x03(\r\x12*\n\x06sprite\x18\x05 \x01(\x0b\x32\x1a.tensorflow.SpriteMetadata\x12\x13\n\x0btensor_path\x18\x06 \x01(\t\"}\n\x0fProjectorConfig\x12\x1d\n\x15model_checkpoint_path\x18\x01 \x01(\t\x12-\n\nembeddings\x18\x02 \x03(\x0b\x32\x19.tensorflow.EmbeddingInfo\x12\x1c\n\x14model_checkpoint_dir\x18\x03 \x01(\tb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SPRITEMETADATA = _descriptor.Descriptor(
name='SpriteMetadata',
full_name='tensorflow.SpriteMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='image_path', full_name='tensorflow.SpriteMetadata.image_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='single_image_dim', full_name='tensorflow.SpriteMetadata.single_image_dim', index=1,
number=2, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=79,
serialized_end=141,
)
_EMBEDDINGINFO = _descriptor.Descriptor(
name='EmbeddingInfo',
full_name='tensorflow.EmbeddingInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='tensor_name', full_name='tensorflow.EmbeddingInfo.tensor_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='metadata_path', full_name='tensorflow.EmbeddingInfo.metadata_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bookmarks_path', full_name='tensorflow.EmbeddingInfo.bookmarks_path', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensor_shape', full_name='tensorflow.EmbeddingInfo.tensor_shape', index=3,
number=4, type=13, cpp_type=3, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sprite', full_name='tensorflow.EmbeddingInfo.sprite', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensor_path', full_name='tensorflow.EmbeddingInfo.tensor_path', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=144,
serialized_end=314,
)
_PROJECTORCONFIG = _descriptor.Descriptor(
name='ProjectorConfig',
full_name='tensorflow.ProjectorConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='model_checkpoint_path', full_name='tensorflow.ProjectorConfig.model_checkpoint_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='embeddings', full_name='tensorflow.ProjectorConfig.embeddings', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='model_checkpoint_dir', full_name='tensorflow.ProjectorConfig.model_checkpoint_dir', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=316,
serialized_end=441,
)
_EMBEDDINGINFO.fields_by_name['sprite'].message_type = _SPRITEMETADATA
_PROJECTORCONFIG.fields_by_name['embeddings'].message_type = _EMBEDDINGINFO
DESCRIPTOR.message_types_by_name['SpriteMetadata'] = _SPRITEMETADATA
DESCRIPTOR.message_types_by_name['EmbeddingInfo'] = _EMBEDDINGINFO
DESCRIPTOR.message_types_by_name['ProjectorConfig'] = _PROJECTORCONFIG
SpriteMetadata = _reflection.GeneratedProtocolMessageType('SpriteMetadata', (_message.Message,), dict(
DESCRIPTOR = _SPRITEMETADATA,
__module__ = 'tensorflow.tensorboard.plugins.projector.projector_config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.SpriteMetadata)
))
_sym_db.RegisterMessage(SpriteMetadata)
EmbeddingInfo = _reflection.GeneratedProtocolMessageType('EmbeddingInfo', (_message.Message,), dict(
DESCRIPTOR = _EMBEDDINGINFO,
__module__ = 'tensorflow.tensorboard.plugins.projector.projector_config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.EmbeddingInfo)
))
_sym_db.RegisterMessage(EmbeddingInfo)
ProjectorConfig = _reflection.GeneratedProtocolMessageType('ProjectorConfig', (_message.Message,), dict(
DESCRIPTOR = _PROJECTORCONFIG,
__module__ = 'tensorflow.tensorboard.plugins.projector.projector_config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.ProjectorConfig)
))
_sym_db.RegisterMessage(ProjectorConfig)
# @@protoc_insertion_point(module_scope)
| |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
import getpass
import os
from absl import app
from future import builtins
import mock
import MySQLdb
from MySQLdb import connections
from MySQLdb.constants import CR as mysql_conn_errors
from grr_response_core import config as grr_config
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_proto import objects_pb2
from grr_response_server import data_store
from grr_response_server import signed_binary_utils
from grr_response_server.bin import config_updater_util
from grr.test_lib import test_lib
class ConfigUpdaterLibTest(test_lib.GRRBaseTest):
def setUp(self):
super(ConfigUpdaterLibTest, self).setUp()
input_patcher = mock.patch.object(builtins, "input")
self.input_mock = input_patcher.start()
self.addCleanup(input_patcher.stop)
@mock.patch.object(MySQLdb, "connect")
@mock.patch.object(getpass, "getpass")
def testConfigureMySQLDatastore(self, getpass_mock, connect_mock):
# Mock user-inputs for MySQL prompts.
self.input_mock.side_effect = [
"", # MySQL hostname (the default is localhost).
"1234", # MySQL port
"grr-test-db", # GRR db name.
"grr-test-user", # GRR db user.
"n", # No SSL.
]
getpass_mock.return_value = "grr-test-password" # DB password for GRR.
connect_mock.return_value = mock.Mock(spec=connections.Connection)
config = grr_config.CONFIG.CopyConfig()
config_updater_util.ConfigureMySQLDatastore(config)
connect_mock.assert_called_once_with(
host="localhost",
port=1234,
db="grr-test-db",
user="grr-test-user",
passwd="grr-test-password",
charset="utf8")
self.assertEqual(config.writeback_data["Mysql.host"], "localhost")
self.assertEqual(config.writeback_data["Mysql.port"], 1234)
self.assertEqual(config.writeback_data["Mysql.database_name"],
"grr-test-db")
self.assertEqual(config.writeback_data["Mysql.database_username"],
"grr-test-user")
self.assertEqual(config.writeback_data["Mysql.database_password"],
"grr-test-password")
@mock.patch.object(MySQLdb, "connect")
@mock.patch.object(getpass, "getpass")
def testConfigureMySQLDatastoreWithSSL(self, getpass_mock, connect_mock):
# Mock user-inputs for MySQL prompts.
self.input_mock.side_effect = [
"", # MySQL hostname (the default is localhost).
"1234", # MySQL port
"grr-test-db", # GRR db name.
"grr-test-user", # GRR db user.
"Y", # Configure SSL.
"key_file_path",
"cert_file_path",
"ca_cert_file_path",
]
getpass_mock.return_value = "grr-test-password" # DB password for GRR.
cursor_mock = mock.Mock()
cursor_mock.fetchone = mock.Mock(return_value=["have_ssl", "YES"])
connect_mock.return_value = mock.Mock(spec=connections.Connection)
connect_mock.return_value.cursor = mock.Mock(return_value=cursor_mock)
config = grr_config.CONFIG.CopyConfig()
config_updater_util.ConfigureMySQLDatastore(config)
connect_mock.assert_called_once_with(
host="localhost",
port=1234,
db="grr-test-db",
user="grr-test-user",
passwd="grr-test-password",
charset="utf8",
ssl={
"key": "key_file_path",
"cert": "cert_file_path",
"ca": "ca_cert_file_path",
})
self.assertEqual(config.writeback_data["Mysql.host"], "localhost")
self.assertEqual(config.writeback_data["Mysql.port"], 1234)
self.assertEqual(config.writeback_data["Mysql.database_name"],
"grr-test-db")
self.assertEqual(config.writeback_data["Mysql.database_username"],
"grr-test-user")
self.assertEqual(config.writeback_data["Mysql.database_password"],
"grr-test-password")
self.assertEqual(config.writeback_data["Mysql.client_key_path"],
"key_file_path")
self.assertEqual(config.writeback_data["Mysql.client_cert_path"],
"cert_file_path")
self.assertEqual(config.writeback_data["Mysql.ca_cert_path"],
"ca_cert_file_path")
@mock.patch.object(MySQLdb, "connect")
@mock.patch.object(getpass, "getpass")
@mock.patch.object(config_updater_util, "_MYSQL_MAX_RETRIES", new=1)
@mock.patch.object(config_updater_util, "_MYSQL_RETRY_WAIT_SECS", new=0.1)
def testConfigureMySQLDatastore_ConnectionRetry(self, getpass_mock,
connect_mock):
# Mock user-inputs for MySQL prompts.
self.input_mock.side_effect = [
"Y", # Use REL_DB as the primary data store.
"", # MySQL hostname (the default is localhost).
"1234", # MySQL port
"grr-test-db", # GRR db name.
"grr-test-user", # GRR db user.
"n", # No SSL.
"n" # Exit config initialization after retries are depleted.
]
getpass_mock.return_value = "grr-test-password" # DB password for GRR.
connect_mock.side_effect = MySQLdb.OperationalError(
mysql_conn_errors.CONNECTION_ERROR, "Fake connection error.")
config = grr_config.CONFIG.CopyConfig()
with self.assertRaises(config_updater_util.ConfigInitError):
config_updater_util.ConfigureMySQLDatastore(config)
self.assertEqual(connect_mock.call_count, 2)
def testUploadPythonHack(self):
with utils.TempDirectory() as dir_path:
python_hack_path = os.path.join(dir_path, "hello_world.py")
with open(python_hack_path, "wb") as f:
f.write(b"print('Hello, world!')")
config_updater_util.UploadSignedBinary(
python_hack_path,
objects_pb2.SignedBinaryID.BinaryType.PYTHON_HACK,
"linux",
upload_subdirectory="test")
python_hack_urn = rdfvalue.RDFURN(
"aff4:/config/python_hacks/linux/test/hello_world.py")
blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinary(
python_hack_urn)
uploaded_blobs = list(
signed_binary_utils.StreamSignedBinaryContents(blob_iterator))
uploaded_content = b"".join(uploaded_blobs)
self.assertEqual(uploaded_content, b"print('Hello, world!')")
def testUploadExecutable(self):
with utils.TempDirectory() as dir_path:
executable_path = os.path.join(dir_path, "foo.exe")
with open(executable_path, "wb") as f:
f.write(b"\xaa\xbb\xcc\xdd")
config_updater_util.UploadSignedBinary(
executable_path,
objects_pb2.SignedBinaryID.BinaryType.EXECUTABLE,
"windows",
upload_subdirectory="anti-malware/registry-tools")
executable_urn = rdfvalue.RDFURN(
"aff4:/config/executables/windows/anti-malware/registry-tools/"
"foo.exe")
blob_iterator, _ = signed_binary_utils.FetchBlobsForSignedBinary(
executable_urn)
uploaded_blobs = list(
signed_binary_utils.StreamSignedBinaryContents(blob_iterator))
uploaded_content = b"".join(uploaded_blobs)
self.assertEqual(uploaded_content, b"\xaa\xbb\xcc\xdd")
def testUploadOverlyLargeSignedBinary(self):
with mock.patch.object(config_updater_util, "_MAX_SIGNED_BINARY_BYTES", 5):
with utils.TempDirectory() as dir_path:
executable_path = os.path.join(dir_path, "foo.exe")
with open(executable_path, "wb") as f:
f.write(b"\xaa\xbb\xcc\xdd\xee\xff")
expected_message = (
"File [%s] is of size 6 (bytes), which exceeds the allowed maximum "
"of 5 bytes." % executable_path)
with self.assertRaisesWithLiteralMatch(
config_updater_util.BinaryTooLargeError, expected_message):
config_updater_util.UploadSignedBinary(
executable_path, objects_pb2.SignedBinaryID.BinaryType.EXECUTABLE,
"windows")
@mock.patch.object(getpass, "getpass")
def testCreateAdminUser(self, getpass_mock):
getpass_mock.return_value = "foo_password"
config_updater_util.CreateUser("foo_user", is_admin=True)
self._AssertStoredUserDetailsAre("foo_user", "foo_password", True)
def testCreateStandardUser(self):
config_updater_util.CreateUser(
"foo_user", password="foo_password", is_admin=False)
self._AssertStoredUserDetailsAre("foo_user", "foo_password", False)
def testCreateAlreadyExistingUser(self):
config_updater_util.CreateUser("foo_user", password="foo_password1")
with self.assertRaises(config_updater_util.UserAlreadyExistsError):
config_updater_util.CreateUser("foo_user", password="foo_password2")
def testUpdateUser(self):
config_updater_util.CreateUser(
"foo_user", password="foo_password1", is_admin=False)
self._AssertStoredUserDetailsAre("foo_user", "foo_password1", False)
config_updater_util.UpdateUser(
"foo_user", password="foo_password2", is_admin=True)
self._AssertStoredUserDetailsAre("foo_user", "foo_password2", True)
def testGetUserSummary(self):
config_updater_util.CreateUser(
"foo_user", password="foo_password", is_admin=False)
self.assertMultiLineEqual(
config_updater_util.GetUserSummary("foo_user"),
"Username: foo_user\nIs Admin: False")
def testGetAllUserSummaries(self):
config_updater_util.CreateUser(
"foo_user1", password="foo_password1", is_admin=False)
config_updater_util.CreateUser(
"foo_user2", password="foo_password2", is_admin=True)
expected_summaries = ("Username: foo_user1\nIs Admin: False\n\n"
"Username: foo_user2\nIs Admin: True")
self.assertMultiLineEqual(config_updater_util.GetAllUserSummaries(),
expected_summaries)
def testDeleteUser(self):
config_updater_util.CreateUser(
"foo_user", password="foo_password", is_admin=False)
self.assertNotEmpty(config_updater_util.GetUserSummary("foo_user"))
config_updater_util.DeleteUser("foo_user")
with self.assertRaises(config_updater_util.UserNotFoundError):
config_updater_util.GetUserSummary("foo_user")
def _AssertStoredUserDetailsAre(self, username, password, is_admin):
user = data_store.REL_DB.ReadGRRUser(username)
self.assertTrue(user.password.CheckPassword(password))
if is_admin:
self.assertEqual(user.user_type,
objects_pb2.GRRUser.UserType.USER_TYPE_ADMIN)
def testArgparseBool_CaseInsensitive(self):
parser = argparse.ArgumentParser()
parser.add_argument("--foo", type=config_updater_util.ArgparseBool)
parser.add_argument("--bar", type=config_updater_util.ArgparseBool)
namespace = parser.parse_args(["--foo", "True", "--bar", "fAlse"])
self.assertIsInstance(namespace.foo, bool)
self.assertIsInstance(namespace.bar, bool)
self.assertTrue(namespace.foo)
self.assertFalse(namespace.bar)
def testArgparseBool_DefaultValue(self):
parser = argparse.ArgumentParser()
parser.add_argument(
"--foo", default=True, type=config_updater_util.ArgparseBool)
parser.add_argument(
"--bar", default=False, type=config_updater_util.ArgparseBool)
namespace = parser.parse_args([])
self.assertTrue(namespace.foo)
self.assertFalse(namespace.bar)
def testArgparseBool_InvalidType(self):
expected_error = "Unexpected type: float. Expected a string."
with self.assertRaisesWithLiteralMatch(argparse.ArgumentTypeError,
expected_error):
config_updater_util.ArgparseBool(1.23)
def testArgparseBool_InvalidValue(self):
expected_error = "Invalid value encountered. Expected 'True' or 'False'."
with self.assertRaisesWithLiteralMatch(argparse.ArgumentTypeError,
expected_error):
config_updater_util.ArgparseBool("baz")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| |
import imp
import os
import random
import sys
import traceback
import io
try:
import threading
except ImportError:
import dummy_threading as threading
from rgkit import rg
from rgkit.gamestate import GameState
from rgkit.settings import settings
sys.modules['rg'] = rg # preserve backwards compatible robot imports
class NullDevice(object):
def write(self, msg):
pass
def flush(self):
pass
# TODO: use actual logging module with log levels and copying stdout
# instead of this
class Tee(object):
def __init__(self, orig, copy, conv=lambda s: s):
self.orig = orig
self.copy = copy
self.conv = conv
def write(self, message):
self.orig.write(message)
self.copy.write(self.conv(message))
def flush(self):
self.orig.flush()
self.copy.flush()
class Player(object):
def __init__(self, file_name=None, robot=None, code=None, name=None):
"""
One of these arguments must be provided:
file_name -- path to file containing a robot
robot -- instance of a robot
code -- source code containing a robot
name argument can be used to set robot's name
"""
self._player_id = None # must be set using set_player_id
self._code = code
if file_name is not None:
with open(file_name) as f:
self._code = f.read()
self._name = os.path.splitext(
os.path.basename(file_name))[0]
if name is not None:
self._name = name
self.load(robot)
def load(self, robot=None):
if robot is not None:
self._name = str(robot.__class__).split('.')[-1]
self._robot = robot
elif self._code:
self._module = imp.new_module('usercode%d' % id(self))
exec(self._code, self._module.__dict__)
self._robot = self._module.Robot()
else:
# No way to reload robot...
pass
def set_player_id(self, player_id):
self._player_id = player_id
@staticmethod
def _numeral_types():
if sys.version_info >= (3, 0):
return (int, float)
else:
return (int, long, float) # noqa
@staticmethod
def _validate_type(robot, var_name, obj, types):
if type(obj) not in types:
raise Exception(
"Bot {0}: {1} of type {2} is not valid.".format(
robot.robot_id, var_name, type(obj).__name__)
)
@staticmethod
def _validate_length(robot, var_name, obj, lengths):
if len(obj) not in lengths:
# assumes that type of obj has already been validated and so
# __repr__ has not been overwritten
raise Exception(
"Bot {0}: {1} of length {2} is not valid.".format(
robot.robot_id, var_name, len(obj))
)
@staticmethod
def _validate_action(robot, action):
"""
Need to be VERY CAREFUL here not to call any built-in functions on
'action' unless it is known to be completely safe. A malicious bot may
return an object with overwritten built-in functions that run arbitrary
code.
"""
Player._validate_type(robot, 'action', action, (list, tuple))
Player._validate_length(robot, 'action', action, (1, 2))
Player._validate_type(robot, 'action[0]', action[0], (str,))
if action[0] in ('move', 'attack'):
if len(action) != 2:
raise Exception(
'Bot {0}: {1} requires a location as well.'.format(
robot.robot_id, action)
)
Player._validate_type(robot, 'action[1]', action[1], (list, tuple))
Player._validate_length(robot, 'action[1]', action[1], (2,))
Player._validate_type(
robot, 'action[1][0]', action[1][0], Player._numeral_types())
Player._validate_type(
robot, 'action[1][1]', action[1][1], Player._numeral_types())
valid_locs = rg.locs_around(
robot.location, filter_out=['invalid', 'obstacle'])
if action[1] not in valid_locs:
raise Exception(
'Bot {0}: {1} is not a valid action.'.format(
robot.robot_id, action)
)
elif action[0] not in ('guard', 'suicide'):
raise ValueError('Bot %d: action must be one of "guard", '
'"suicide", "move", or "attack".')
def _get_response(self, game_state, game_info, robot, seed):
"""Returns sanitized action, output and error flag from robot"""
try:
exc_flag = False
captured_output = io.BytesIO()
def conv(s):
return s.encode('ascii', 'replace')
_stdout = sys.stdout
_stderr = sys.stderr
sys.stdout = Tee(sys.stdout, captured_output, conv=conv)
sys.stderr = Tee(sys.stderr, captured_output, conv=conv)
random.seed(seed)
# Server requires knowledge of seed
game_info.seed = seed
self._robot.location = robot.location
self._robot.hp = robot.hp
self._robot.player_id = robot.player_id
self._robot.robot_id = robot.robot_id
action = self._robot.act(game_info)
Player._validate_action(robot, action)
if action[0] in ('move', 'attack'):
action = (
action[0],
(int(action[1][0]), int(action[1][1]))
)
elif action[0] in ('guard', 'suicide'):
action = (action[0],)
except:
exc_flag = True
traceback.print_exc(file=sys.stderr)
action = ('guard',)
finally:
sys.stdout = _stdout
sys.stderr = _stderr
return action, (exc_flag, captured_output.getvalue())
def get_responses(self, game_state, seed):
"""
Returns a tuple of two dictionaries containing actions,
and (error flag and output) for each bot, respectively
"""
game_info = game_state.get_game_info(self._player_id)
actions, outputs = {}, {}
for loc, robot in game_state.robots.items():
if robot.player_id == self._player_id:
# Every act call should get a different random seed
actions[loc], outputs[loc] = self._get_response(
game_state, game_info, robot,
seed=str(seed) + '-' + str(robot.robot_id))
return actions, outputs
def name(self):
return self._name
class Game(object):
def __init__(self, players, record_actions=False, record_history=False,
print_info=False, seed=None, quiet=0, delta_callback=None,
symmetric=True):
self._players = players
for i, player in enumerate(self._players):
player.set_player_id(i)
self._record_actions = record_actions
self._record_history = record_history
self._print_info = print_info
if seed is None:
seed = random.randint(0, settings.max_seed)
self.seed = str(seed)
self._random = random.Random(self.seed)
self._quiet = quiet
self._delta_callback = delta_callback
self._state = GameState(use_start=True, seed=self.seed,
symmetric=symmetric)
self._actions_on_turn = {}
self._states = {}
self.history = [] # TODO: make private
# actions_on_turn = {loc: log_item}
# log_item = {
# 'name': action_name,
# 'target': action_target or None,
# 'loc': loc,
# 'hp': hp,
# 'player': player_id,
# 'loc_end': loc_end,
# 'hp_end': hp_end
# }
#
# or dummy if turn == settings.max_turn
def get_actions_on_turn(self, turn):
assert self._record_actions
return self._actions_on_turn[turn]
def get_state(self, turn):
return self._states[turn]
def _save_actions_on_turn(self, actions_on_turn, turn):
self._actions_on_turn[turn] = actions_on_turn
def _save_state(self, state, turn):
self._states[turn] = state
def _get_robots_responses(self):
# TODO: honour quietness
actions, outputs = {}, {}
for player in self._players:
seed = self._random.randint(0, settings.max_seed)
responses = player.get_responses(self._state, seed)
actions.update(responses[0])
outputs.update(responses[1])
return actions, outputs
def _make_history(self, responses, record_output=False):
# todo: rework this. We are getting data about the player
# from two sources: 1) from arguments, and 2) from
# class members. Either move *all* to 1) and make
# static or move all to 2).
'''
An aggregate of all bots and their actions this turn.
Optionally records per-bot output.
Stores a list of each player's bots at the start of this turn and
the actions they each performed this turn. Newly spawned bots have no
actions.
'''
actions, outputs = responses
robots = []
for loc, robot in self._state.robots.items():
robot_info = {
'location': loc,
'hp': robot.hp,
'player_id': robot.player_id,
'robot_id': robot.robot_id,
}
if loc in actions:
# since the state after the final turn does not contain any
# actions, 'loc' is not always contained in 'actions'
robot_info['action'] = actions[loc]
if outputs and loc in outputs:
robot_info['output'] = outputs[loc]
robots.append(robot_info)
return robots
def _calculate_actions_on_turn(self, delta, actions):
actions_on_turn = {}
for delta_info in delta:
loc = delta_info.loc
if loc in actions:
name = actions[loc][0]
if name in ['move', 'attack']:
target = actions[loc][1]
else:
target = None
else:
name = 'spawn'
target = None
# note that a spawned bot may overwrite an existing bot
actions_on_turn[loc] = {
'name': name,
'target': target,
'loc': loc,
'hp': delta_info.hp,
'player': delta_info.player_id,
'loc_end': delta_info.loc_end,
'hp_end': delta_info.hp_end
}
return actions_on_turn
def run_turn(self, record_output=False):
if self._print_info:
print((' running turn %d ' % (self._state.turn)).center(70, '-'))
responses = self._get_robots_responses()
actions = responses[0]
delta = self._state.get_delta(actions)
if self._record_actions:
actions_on_turn = self._calculate_actions_on_turn(delta, actions)
self._save_actions_on_turn(actions_on_turn, self._state.turn)
new_state = self._state.apply_delta(delta)
if self._delta_callback is not None and self._state.turn > 1:
self._delta_callback(delta, new_state)
self._save_state(new_state, new_state.turn)
if self._record_history:
self.history.append(self._make_history(
responses, record_output=record_output))
self._state = new_state
def run_all_turns(self):
assert self._state.turn == 0
if self._print_info:
print(('Match seed: {0}'.format(self.seed)))
self._save_state(self._state, 0)
while self._state.turn < settings.max_turns:
self.run_turn()
# create last turn's state for server history
if self._record_history:
self.history.append(self._make_history(({}, {})))
# create dummy data for last turn
# TODO: render should be cleverer
actions_on_turn = {}
for loc, robot in self._state.robots.items():
log_item = {
'name': '',
'target': None,
'loc': loc,
'hp': robot.hp,
'player': robot.player_id,
'loc_end': loc,
'hp_end': robot.hp
}
actions_on_turn[loc] = log_item
self._save_actions_on_turn(actions_on_turn, settings.max_turns)
def get_scores(self):
return self.get_state(settings.max_turns).get_scores()
class ThreadedGame(Game):
def __init__(self, *args, **kwargs):
super(ThreadedGame, self).__init__(*args, **kwargs)
# events set when actions_on_turn are calculated
self._has_actions_on_turn = [threading.Event()
for _ in range(settings.max_turns + 1)]
# events set when state are calculated
self._has_state = [threading.Event()
for _ in range(settings.max_turns + 1)]
def get_actions_on_turn(self, turn):
self._has_actions_on_turn[turn].wait()
return super(ThreadedGame, self).get_actions_on_turn(turn)
def get_state(self, turn):
self._has_state[turn].wait()
return super(ThreadedGame, self).get_state(turn)
def _save_actions_on_turn(self, actions_on_turn, turn):
super(ThreadedGame, self)._save_actions_on_turn(actions_on_turn, turn)
self._has_actions_on_turn[turn].set()
def _save_state(self, state, turn):
super(ThreadedGame, self)._save_state(state, turn)
self._has_state[turn].set()
def run_all_turns(self):
lock = threading.Lock()
def task():
with lock:
super(ThreadedGame, self).run_all_turns()
turn_runner = threading.Thread(target=task)
turn_runner.daemon = True
turn_runner.start()
| |
# Copyright (C) 2009 Duncan McGreggor <duncan@canonical.com>
# Licenced under the txaws licence available at /LICENSE in the txaws source.
from txaws.credentials import AWSCredentials
from txaws.ec2.client import EC2Client
try:
from txaws.s3.client import S3Client
except ImportError:
s3clientSkip = ("S3Client couldn't be imported (perhaps because dateutil, "
"on which it depends, isn't present)")
else:
s3clientSkip = None
from txaws.service import (AWSServiceEndpoint, AWSServiceRegion,
EC2_ENDPOINT_EU, EC2_ENDPOINT_US, REGION_EU)
from txaws.testing.base import TXAWSTestCase
class AWSServiceEndpointTestCase(TXAWSTestCase):
def setUp(self):
self.endpoint = AWSServiceEndpoint(uri="http://my.service/da_endpoint")
def test_simple_creation(self):
endpoint = AWSServiceEndpoint()
self.assertEquals(endpoint.scheme, "http")
self.assertEquals(endpoint.host, "")
self.assertEquals(endpoint.port, None)
self.assertEquals(endpoint.path, "/")
self.assertEquals(endpoint.method, "GET")
def test_custom_method(self):
endpoint = AWSServiceEndpoint(
uri="http://service/endpoint", method="PUT")
self.assertEquals(endpoint.method, "PUT")
def test_parse_uri(self):
self.assertEquals(self.endpoint.scheme, "http")
self.assertEquals(self.endpoint.host, "my.service")
self.assertIdentical(self.endpoint.port, None)
self.assertEquals(self.endpoint.path, "/da_endpoint")
def test_parse_uri_https_and_custom_port(self):
endpoint = AWSServiceEndpoint(uri="https://my.service:8080/endpoint")
self.assertEquals(endpoint.scheme, "https")
self.assertEquals(endpoint.host, "my.service")
self.assertEquals(endpoint.port, 8080)
self.assertEquals(endpoint.path, "/endpoint")
def test_get_uri(self):
uri = self.endpoint.get_uri()
self.assertEquals(uri, "http://my.service/da_endpoint")
def test_get_uri_custom_port(self):
uri = "https://my.service:8080/endpoint"
endpoint = AWSServiceEndpoint(uri=uri)
new_uri = endpoint.get_uri()
self.assertEquals(new_uri, uri)
def test_set_host(self):
self.assertEquals(self.endpoint.host, "my.service")
self.endpoint.set_host("newhost.com")
self.assertEquals(self.endpoint.host, "newhost.com")
def test_get_host(self):
self.assertEquals(self.endpoint.host, self.endpoint.get_host())
def test_get_canonical_host(self):
"""
If the port is not specified the canonical host is the same as
the host.
"""
uri = "http://my.service/endpoint"
endpoint = AWSServiceEndpoint(uri=uri)
self.assertEquals("my.service", endpoint.get_canonical_host())
def test_get_canonical_host_with_non_default_port(self):
"""
If the port is not the default, the canonical host includes it.
"""
uri = "http://my.service:99/endpoint"
endpoint = AWSServiceEndpoint(uri=uri)
self.assertEquals("my.service:99", endpoint.get_canonical_host())
def test_get_canonical_host_is_lower_case(self):
"""
The canonical host is guaranteed to be lower case.
"""
uri = "http://MY.SerVice:99/endpoint"
endpoint = AWSServiceEndpoint(uri=uri)
self.assertEquals("my.service:99", endpoint.get_canonical_host())
def test_set_canonical_host(self):
"""
The canonical host is converted to lower case.
"""
endpoint = AWSServiceEndpoint()
endpoint.set_canonical_host("My.Service")
self.assertEquals("my.service", endpoint.host)
self.assertIdentical(None, endpoint.port)
def test_set_canonical_host_with_port(self):
"""
The canonical host can optionally have a port.
"""
endpoint = AWSServiceEndpoint()
endpoint.set_canonical_host("my.service:99")
self.assertEquals("my.service", endpoint.host)
self.assertEquals(99, endpoint.port)
def test_set_canonical_host_with_empty_port(self):
"""
The canonical host can also have no port.
"""
endpoint = AWSServiceEndpoint()
endpoint.set_canonical_host("my.service:")
self.assertEquals("my.service", endpoint.host)
self.assertIdentical(None, endpoint.port)
def test_set_path(self):
self.endpoint.set_path("/newpath")
self.assertEquals(
self.endpoint.get_uri(),
"http://my.service/newpath")
def test_set_method(self):
self.assertEquals(self.endpoint.method, "GET")
self.endpoint.set_method("PUT")
self.assertEquals(self.endpoint.method, "PUT")
class AWSServiceRegionTestCase(TXAWSTestCase):
def setUp(self):
self.creds = AWSCredentials("foo", "bar")
self.region = AWSServiceRegion(creds=self.creds)
def test_simple_creation(self):
self.assertEquals(self.creds, self.region.creds)
self.assertEquals(self.region._clients, {})
self.assertEquals(self.region.ec2_endpoint.get_uri(), EC2_ENDPOINT_US)
def test_creation_with_keys(self):
region = AWSServiceRegion(access_key="baz", secret_key="quux")
self.assertEquals(region.creds.access_key, "baz")
self.assertEquals(region.creds.secret_key, "quux")
def test_creation_with_keys_and_creds(self):
"""
creds take precedence over individual access key/secret key pairs.
"""
region = AWSServiceRegion(self.creds, access_key="baz",
secret_key="quux")
self.assertEquals(region.creds.access_key, "foo")
self.assertEquals(region.creds.secret_key, "bar")
def test_creation_with_uri(self):
region = AWSServiceRegion(
creds=self.creds, ec2_uri="http://foo/bar")
self.assertEquals(region.ec2_endpoint.get_uri(), "http://foo/bar")
def test_creation_with_uri_backwards_compatible(self):
region = AWSServiceRegion(
creds=self.creds, uri="http://foo/bar")
self.assertEquals(region.ec2_endpoint.get_uri(), "http://foo/bar")
def test_creation_with_uri_and_region(self):
region = AWSServiceRegion(
creds=self.creds, region=REGION_EU, ec2_uri="http://foo/bar")
self.assertEquals(region.ec2_endpoint.get_uri(), "http://foo/bar")
def test_creation_with_region_override(self):
region = AWSServiceRegion(creds=self.creds, region=REGION_EU)
self.assertEquals(region.ec2_endpoint.get_uri(), EC2_ENDPOINT_EU)
def test_get_ec2_client_with_empty_cache(self):
key = str(EC2Client) + str(self.creds) + str(self.region.ec2_endpoint)
original_client = self.region._clients.get(key)
new_client = self.region.get_client(
EC2Client, creds=self.creds, endpoint=self.region.ec2_endpoint)
self.assertEquals(original_client, None)
self.assertTrue(isinstance(new_client, EC2Client))
self.assertNotEquals(original_client, new_client)
def test_get_ec2_client_from_cache_default(self):
client1 = self.region.get_ec2_client()
client2 = self.region.get_ec2_client()
self.assertTrue(isinstance(client1, EC2Client))
self.assertTrue(isinstance(client2, EC2Client))
self.assertEquals(client1, client2)
def test_get_ec2_client_from_cache(self):
client1 = self.region.get_client(
EC2Client, creds=self.creds, endpoint=self.region.ec2_endpoint)
client2 = self.region.get_client(
EC2Client, creds=self.creds, endpoint=self.region.ec2_endpoint)
self.assertTrue(isinstance(client1, EC2Client))
self.assertTrue(isinstance(client2, EC2Client))
self.assertEquals(client1, client2)
def test_get_ec2_client_from_cache_with_purge(self):
client1 = self.region.get_client(
EC2Client, creds=self.creds, endpoint=self.region.ec2_endpoint,
purge_cache=True)
client2 = self.region.get_client(
EC2Client, creds=self.creds, endpoint=self.region.ec2_endpoint,
purge_cache=True)
self.assertTrue(isinstance(client1, EC2Client))
self.assertTrue(isinstance(client2, EC2Client))
self.assertNotEquals(client1, client2)
def test_get_s3_client_with_empty_cache(self):
key = str(S3Client) + str(self.creds) + str(self.region.s3_endpoint)
original_client = self.region._clients.get(key)
new_client = self.region.get_client(
S3Client, creds=self.creds, endpoint=self.region.s3_endpoint)
self.assertEquals(original_client, None)
self.assertTrue(isinstance(new_client, S3Client))
self.assertNotEquals(original_client, new_client)
test_get_s3_client_with_empty_cache.skip = s3clientSkip
| |
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Android system-wide tracing utility.
This is a tool for capturing a trace that includes data from both userland and
the kernel. It creates an HTML file for visualizing the trace.
"""
import errno, optparse, os, select, subprocess, sys, time, zlib
# This list is based on the tags in frameworks/native/include/utils/Trace.h.
trace_tag_bits = {
'gfx': 1<<1,
'input': 1<<2,
'view': 1<<3,
'webview': 1<<4,
'wm': 1<<5,
'am': 1<<6,
'sync': 1<<7,
'audio': 1<<8,
'video': 1<<9,
'camera': 1<<10,
}
flattened_html_file = 'systrace_trace_viewer.html'
def add_adb_serial(command, serial):
if serial != None:
command.insert(1, serial)
command.insert(1, '-s')
def main():
parser = optparse.OptionParser()
parser.add_option('-o', dest='output_file', help='write HTML to FILE',
default='trace.html', metavar='FILE')
parser.add_option('-t', '--time', dest='trace_time', type='int',
help='trace for N seconds', metavar='N')
parser.add_option('-b', '--buf-size', dest='trace_buf_size', type='int',
help='use a trace buffer size of N KB', metavar='N')
parser.add_option('-d', '--disk', dest='trace_disk', default=False,
action='store_true', help='trace disk I/O (requires root)')
parser.add_option('-f', '--cpu-freq', dest='trace_cpu_freq', default=False,
action='store_true', help='trace CPU frequency changes')
parser.add_option('-i', '--cpu-idle', dest='trace_cpu_idle', default=False,
action='store_true', help='trace CPU idle events')
parser.add_option('-l', '--cpu-load', dest='trace_cpu_load', default=False,
action='store_true', help='trace CPU load')
parser.add_option('-s', '--no-cpu-sched', dest='trace_cpu_sched',
default=True, action='store_false', help='inhibit tracing '
'CPU scheduler (allows longer trace times by reducing data '
'rate into buffer)')
parser.add_option('-u', '--bus-utilization', dest='trace_bus_utilization',
default=False, action='store_true',
help='trace bus utilization (requires root)')
parser.add_option('-w', '--workqueue', dest='trace_workqueue', default=False,
action='store_true', help='trace the kernel workqueues ' +
'(requires root)')
parser.add_option('--set-tags', dest='set_tags', action='store',
help='set the enabled trace tags and exit; set to a ' +
'comma separated list of: ' +
', '.join(trace_tag_bits.iterkeys()))
parser.add_option('--link-assets', dest='link_assets', default=False,
action='store_true', help='link to original CSS or JS '
'resources instead of embedding them')
parser.add_option('--from-file', dest='from_file', action='store',
help='read the trace from a file rather than running a live'
' trace')
parser.add_option('--asset-dir', dest='asset_dir', default='trace-viewer',
type='string', help='')
parser.add_option('-e', '--serial', dest='device_serial', type='string',
help='adb device serial number')
options, unused_args = parser.parse_args() # pylint: disable=unused-variable
if options.link_assets or options.asset_dir != 'trace-viewer':
parser.error('--link-assets and --asset-dir is deprecated.')
if options.set_tags:
flags = 0
tags = options.set_tags.split(',')
for tag in tags:
try:
flags |= trace_tag_bits[tag]
except KeyError:
parser.error('unrecognized tag: %s\nknown tags are: %s' %
(tag, ', '.join(trace_tag_bits.iterkeys())))
atrace_args = ['adb', 'shell', 'setprop', 'debug.atrace.tags.enableflags',
hex(flags)]
add_adb_serial(atrace_args, options.device_serial)
try:
subprocess.check_call(atrace_args)
except subprocess.CalledProcessError, e:
print >> sys.stderr, 'unable to set tags: %s' % e
print '\nSet enabled tags to: %s\n' % ', '.join(tags)
print ('You will likely need to restart the Android framework for this to '
'take effect:\n\n adb shell stop\n adb shell start\n')
return
atrace_args = ['adb', 'shell', 'atrace', '-z']
add_adb_serial(atrace_args, options.device_serial)
if options.trace_disk:
atrace_args.append('-d')
if options.trace_cpu_freq:
atrace_args.append('-f')
if options.trace_cpu_idle:
atrace_args.append('-i')
if options.trace_cpu_load:
atrace_args.append('-l')
if options.trace_cpu_sched:
atrace_args.append('-s')
if options.trace_bus_utilization:
atrace_args.append('-u')
if options.trace_workqueue:
atrace_args.append('-w')
if options.trace_time is not None:
if options.trace_time > 0:
atrace_args.extend(['-t', str(options.trace_time)])
else:
parser.error('the trace time must be a positive number')
if options.trace_buf_size is not None:
if options.trace_buf_size > 0:
atrace_args.extend(['-b', str(options.trace_buf_size)])
else:
parser.error('the trace buffer size must be a positive number')
if options.from_file is not None:
atrace_args = ['cat', options.from_file]
script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
with open(os.path.join(script_dir, flattened_html_file), 'r') as f:
trace_viewer_html = f.read()
html_filename = options.output_file
trace_started = False
leftovers = ''
adb = subprocess.Popen(atrace_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
dec = zlib.decompressobj()
while True:
ready = select.select([adb.stdout, adb.stderr], [],
[adb.stdout, adb.stderr])
if adb.stderr in ready[0]:
err = os.read(adb.stderr.fileno(), 4096)
sys.stderr.write(err)
sys.stderr.flush()
if adb.stdout in ready[0]:
out = leftovers + os.read(adb.stdout.fileno(), 4096)
if options.from_file is None:
out = out.replace('\r\n', '\n')
if out.endswith('\r'):
out = out[:-1]
leftovers = '\r'
else:
leftovers = ''
if not trace_started:
lines = out.splitlines(True)
out = ''
for i, line in enumerate(lines):
if line == 'TRACE:\n':
sys.stdout.write("downloading trace...")
sys.stdout.flush()
out = ''.join(lines[i+1:])
html_prefix = read_asset(script_dir, 'prefix.html')
html_file = open(html_filename, 'w')
html_file.write(
html_prefix.replace("{{SYSTRACE_TRACE_VIEWER_HTML}}",
trace_viewer_html))
html_file.write('<!-- BEGIN TRACE -->\n' +
' <script class="trace-data" type="application/text">\n')
trace_started = True
break
elif 'TRACE:'.startswith(line) and i == len(lines) - 1:
leftovers = line + leftovers
else:
sys.stdout.write(line)
sys.stdout.flush()
if len(out) > 0:
out = dec.decompress(out)
html_out = out.replace('\n', '\\n\\\n')
if len(html_out) > 0:
html_file.write(html_out)
result = adb.poll()
if result is not None:
break
if result != 0:
print >> sys.stderr, 'adb returned error code %d' % result
elif trace_started:
html_out = dec.flush().replace('\n', '\\n\\\n').replace('\r', '')
if len(html_out) > 0:
html_file.write(html_out)
html_file.write(' </script>\n<!-- END TRACE -->\n')
html_suffix = read_asset(script_dir, 'suffix.html')
html_file.write(html_suffix)
html_file.close()
print " done\n\n wrote file://%s\n" % (
os.path.abspath(options.output_file))
else:
print >> sys.stderr, ('An error occured while capturing the trace. Output '
'file was not written.')
def read_asset(src_dir, filename):
return open(os.path.join(src_dir, filename)).read()
if __name__ == '__main__':
main()
| |
#pylint: disable=invalid-name, too-few-public-methods, too-many-public-methods
from __future__ import print_function, absolute_import
import re
import unittest
from katana import cigar
import katana.util
from test.util_test import MicroMock
class CigarUtilTestCase(unittest.TestCase):
def test_assert_query_lengths_match(self):
base = cigar.CigarUtil(42, "10M")
trivially_matching = cigar.CigarUtil(42, "10M")
base._assert_query_lengths_match(trivially_matching)
subtly_matching = cigar.CigarUtil(42, "2M15N8S")
base._assert_query_lengths_match(subtly_matching)
not_matching = cigar.CigarUtil(42, "2S8M2S")
self.assertRaisesRegexp(katana.util.KatanaException,
r"Old CIGAR query length.*10M.*10.*2S8M2S.*12.*",
base._assert_query_lengths_match,
not_matching)
def test_ref_consuming(self):
util = cigar.CigarUtil(0, "1M")
self.assertEquals(True, util._is_ref_consuming("M"))
self.assertEquals(False, util._is_ref_consuming("I"))
self.assertEquals(True, util._is_ref_consuming("S"))
self.assertEquals(True, util._is_ref_consuming("="))
self.assertEquals(True, util._is_ref_consuming("X"))
self.assertEquals(True, util._is_ref_consuming("D"))
self.assertEquals(True, util._is_ref_consuming("N"))
self.assertEquals(False, util._is_ref_consuming("H"))
self.assertEquals(False, util._is_ref_consuming("P"))
def test_init(self):
util = cigar.CigarUtil(42, "10M")
self.assertEquals("10M", util.cigar)
self.assertEquals(42, util.reference_start)
def test_init_valid(self):
util = cigar.CigarUtil(42, "1M1I1D1N1=1X" "1H1S")
self.assertEquals(True, util.is_valid)
def test_init_invalid(self):
util = cigar.CigarUtil(42, "1H1S")
self.assertEquals(False, util.is_valid)
def test_init_withProfile(self):
util = cigar.CigarUtil(42, cigar_profile="XXMMM")
self.assertEquals("2X3M", util.cigar)
self.assertEquals("XXMMM", util.cigar_profile)
self.assertEquals(42, util.reference_start)
def test_init_inconsistentCigarProfileDoesNotFail(self):
util = cigar.CigarUtil(42, cigar="10M", cigar_profile="XXMMM")
self.assertEquals("10M", util.cigar)
self.assertEquals("XXMMM", util.cigar_profile)
self.assertEquals(42, util.reference_start)
def test_init_with_neitherCigarNorProfile(self):
util = cigar.CigarUtil(42, cigar="", cigar_profile="")
self.assertEquals("", util.cigar)
self.assertEquals("", util.cigar_profile)
self.assertEquals(42, util.reference_start)
util = cigar.CigarUtil(42, cigar=None, cigar_profile=None)
self.assertEquals("", util.cigar)
self.assertEquals("", util.cigar_profile)
self.assertEquals(42, util.reference_start)
util = cigar.CigarUtil(42)
self.assertEquals("", util.cigar)
self.assertEquals("", util.cigar_profile)
self.assertEquals(42, util.reference_start)
def test_eq(self):
base = cigar.CigarUtil(42, "10M")
self.assertEquals(base, cigar.CigarUtil(42, "10M"))
self.assertNotEquals(base, cigar.CigarUtil(10, "10M"))
self.assertNotEquals(base, cigar.CigarUtil(42, "1M"))
def test_repr(self):
base = cigar.CigarUtil(42, "10M")
match = re.match("<.*(cigar.*?)'>(.*)", repr(base))
eval_repr = match.group(1) + match.group(2)
#pylint: disable=eval-used
self.assertEquals(base, eval(eval_repr))
def test_cigar_profile(self):
util = cigar.CigarUtil(42, "5M1I4S")
self.assertEquals("MMMMMISSSS", util.cigar_profile)
def test_cigar_profile_longer(self):
util = cigar.CigarUtil(42, "10M1I1D10H")
self.assertEquals("MMMMMMMMMMIDHHHHHHHHHH", util.cigar_profile)
def test_softclip_replacesConsumingOpsWithSoftClips(self):
util = cigar.CigarUtil(0, "1M")
self.assertEquals("HS" "SSSS" "SH",
util._softclip(cigar_profile="HS" "MI=X" "SH"))
def test_softclip_removesNonConsumingOps(self):
util = cigar.CigarUtil(0, "1M")
self.assertEquals("HS" "SH",
util._softclip(cigar_profile="HS" "DP" "SH"))
def test_softclip_to_first_match(self):
util = cigar.CigarUtil(0, "1M")
self.assertEquals((45, "MM"),
util._softclip_to_first_match(45, "MM"))
def test_softclip_to_first_noMatchesPassesThrough(self):
util = cigar.CigarUtil(0, "1M")
self.assertEquals((45, "SS"),
util._softclip_to_first_match(45, "SS"))
def test_softclip_to_first_match_posIsAdjusted(self):
util = cigar.CigarUtil(0, "1M")
self.assertEquals((45, "SS" "MPNDISH"),
util._softclip_to_first_match(42, "SDNIP" "MPNDISH"))
def test_softclip_to_first_match_uncommonMatchOpOk(self):
util = cigar.CigarUtil(0, "1M")
self.assertEquals((45, "SS" "XPNDISH"),
util._softclip_to_first_match(42, "SDNIP" "XPNDISH"))
util = cigar.CigarUtil(0, "1M")
self.assertEquals((45, "SS" "=PNDISH"),
util._softclip_to_first_match(42, "SDNIP" "=PNDISH"))
def test_pos_profiles(self):
util = cigar.CigarUtil(0, "1M")
actual = util._pos_profiles("MMM")
self.assertEquals((0, ["M", "M", "M"]), actual)
def test_pos_profiles_uncommonMatchOpOk(self):
util = cigar.CigarUtil(0, "1M")
actual = util._pos_profiles("=MM")
self.assertEquals((0, ["=", "M", "M"]), actual)
def test_pos_profiles_leadingSoftclips(self):
util = cigar.CigarUtil(0, "1M")
actual = util._pos_profiles("SMM")
self.assertEquals((1, ["S", "M", "M"]), actual)
def test_pos_profiles_insertOk(self):
util = cigar.CigarUtil(0, "1M")
actual = util._pos_profiles("SIM")
self.assertEquals((1, ["S", "IM"]), actual)
def test_pos_profiles_deleteOk(self):
util = cigar.CigarUtil(0, "1M")
actual = util._pos_profiles("SDM")
self.assertEquals((2, ["S", "D", "M"]), actual)
def test_pos_profiles_trailingHardclips(self):
util = cigar.CigarUtil(0, "1M")
actual = util._pos_profiles("SDMHH")
self.assertEquals((2, ["S", "D", "M", "HH"]), actual)
def test_partition_cigar(self):
util = cigar.CigarUtil(42, "2S" "6M" "2S")
(c1, c2, c3) = util._partition_cigar(42, 48)
self.assertEquals(cigar.CigarUtil(40,"2S"), c1)
self.assertEquals(cigar.CigarUtil(42,"6M"), c2)
self.assertEquals(cigar.CigarUtil(48,"2S"), c3)
def test_partition_cigar_refConsumingOps(self):
util = cigar.CigarUtil(42, "5M5X")
# | 4444444455
# | 2345678901
# | MMMMM
# | XXXXX
(c1, c2, c3) = util._partition_cigar(42, 47)
self.assertEquals(cigar.CigarUtil(42,""), c1)
self.assertEquals(cigar.CigarUtil(42,"5M"), c2)
self.assertEquals(cigar.CigarUtil(47,"5X"), c3)
(c1, c2, c3) = util._partition_cigar(47, 52)
self.assertEquals(cigar.CigarUtil(42,"5M"), c1)
self.assertEquals(cigar.CigarUtil(47,"5X"), c2)
self.assertEquals(cigar.CigarUtil(52,""), c3)
def test_partition_cigar_refNonconsumingOps(self):
util = cigar.CigarUtil(42, "5M4I1M")
(c1, c2, c3) = util._partition_cigar(42, 47)
self.assertEquals(cigar.CigarUtil(42,""), c1)
self.assertEquals(cigar.CigarUtil(42,"5M"), c2)
self.assertEquals(cigar.CigarUtil(47,"4I1M"), c3)
(c1, c2, c3) = util._partition_cigar(47, 48)
self.assertEquals(cigar.CigarUtil(42,"5M"), c1)
self.assertEquals(cigar.CigarUtil(47,"4I1M"), c2)
self.assertEquals(cigar.CigarUtil(48,""), c3)
def test_partition_cigar_refNonconsumingOpsWithFlankingSoftclips(self):
util = cigar.CigarUtil(42, "5S" "5M" "4I" "1M" "5S")
(c1, c2, c3) = util._partition_cigar(42, 47)
self.assertEquals(cigar.CigarUtil(37,"5S"), c1)
self.assertEquals(cigar.CigarUtil(42,"5M"), c2)
self.assertEquals(cigar.CigarUtil(47,"4I1M5S"), c3)
(c1, c2, c3) = util._partition_cigar(47, 48)
self.assertEquals(cigar.CigarUtil(37,"5S5M"), c1)
self.assertEquals(cigar.CigarUtil(47,"4I1M"), c2)
self.assertEquals(cigar.CigarUtil(48,"5S"), c3)
(c1, c2, c3) = util._partition_cigar(48, 53)
self.assertEquals(cigar.CigarUtil(37,"5S5M4I1M"), c1)
self.assertEquals(cigar.CigarUtil(48,"5S"), c2)
self.assertEquals(cigar.CigarUtil(53,""), c3)
def test_partition_outOfBounds(self):
# ref | 4444444444
# | 0123456789
# read | SSMMMMMXX
# index | 210123456
util = cigar.CigarUtil(42, "2S" "6M" "2X")
(c1, c2, c3) = util._partition_cigar(30, 42)
self.assertEquals(cigar.CigarUtil(40,""), c1)
self.assertEquals(cigar.CigarUtil(40,"2S"), c2)
self.assertEquals(cigar.CigarUtil(42,"6M2X"), c3)
(c1, c2, c3) = util._partition_cigar(48, 60)
self.assertEquals(cigar.CigarUtil(40,"2S6M"), c1)
self.assertEquals(cigar.CigarUtil(48,"2X"), c2)
self.assertEquals(cigar.CigarUtil(50,""), c3)
(c1, c2, c3) = util._partition_cigar(20, 30)
self.assertEquals(cigar.CigarUtil(40,""), c1)
self.assertEquals(cigar.CigarUtil(40,""), c2)
self.assertEquals(cigar.CigarUtil(40,"2S6M2X"), c3)
(c1, c2, c3) = util._partition_cigar(100, 110)
self.assertEquals(cigar.CigarUtil(40,"2S6M2X"), c1)
self.assertEquals(cigar.CigarUtil(50,""), c2)
self.assertEquals(cigar.CigarUtil(50,""), c3)
def test_partition_cigar_targetRegionBoundedByShortReverseRead(self):
util = cigar.CigarUtil(100, "20M")
(c1, c2, c3) = util._partition_cigar(90, 120)
self.assertEquals(100, c1.reference_start)
self.assertEquals("", c1.cigar)
self.assertEquals(100, c2.reference_start)
self.assertEquals("20M", c2.cigar)
self.assertEquals(120, c3.reference_start)
self.assertEquals("", c3.cigar)
def test_softclip_target(self):
util = cigar.CigarUtil(42, "10M")
new_util = util.softclip_target(44,50)
self.assertEquals("2S6M2S", new_util.cigar)
self.assertEquals(44, new_util.reference_start)
def test_softclip_target_validatesLength(self):
base = cigar.CigarUtil(42, "10M")
base.query_length = 100
self.assertRaises(katana.util.KatanaException,
base.softclip_target,
44,
50)
def test_softclip_target_flankingSoftclips(self):
util = cigar.CigarUtil(42, "2S" "6M" "2S")
# 4444444444
# 0123456789
# SSMMMMMMSS
# SSSMMMMSSS
new_util = util.softclip_target(43,47)
self.assertEquals("3S4M3S", new_util.cigar)
self.assertEquals(43, new_util.reference_start)
def test_softclip_target_flankingHardclips(self):
util = cigar.CigarUtil(42, "2H1S" "4M" "1S2H")
#3444444444
#9012345678
#HHSMMMMSHH
#HHSSMMSSHH
new_util = util.softclip_target(43,45)
self.assertEquals("2H2S" "2M" "2S2H", new_util.cigar)
self.assertEquals(43, new_util.reference_start)
def test_softclip_target_deletes(self):
util = cigar.CigarUtil(42, "2M3D" "4M" "1S")
#4444444455
#2345678901
#MMDDD
# MMMM
# S
#SS MMMMS
new_util = util.softclip_target(47,51)
self.assertEquals("2S" "4M" "1S", new_util.cigar)
self.assertEquals(47, new_util.reference_start)
def test_softclip_target_edgeInsert(self):
util = cigar.CigarUtil(42, "3M" "1I4M" "2X")
#444 444445
#234 567890
#ATAAACGTAC
#MMMI
# MMMM
# XX
#SSSSMMMMSS
new_util = util.softclip_target(45,49)
self.assertEquals("4S" "4M" "2S", new_util.cigar)
self.assertEquals(45, new_util.reference_start)
def test_softclip_target_edgeDelete(self):
util = cigar.CigarUtil(42, "3M" "1D5M" "2S")
#44444444555
#23456789012
#ATA ACGTACG
#MMM
# DMMMM
# MSS
#SSS MMMMSSS
new_util = util.softclip_target(45,50)
self.assertEquals("3S" "4M" "3S", new_util.cigar)
self.assertEquals(46, new_util.reference_start)
class NullCigarTestCase(unittest.TestCase):
def test_null_cigar(self):
c = cigar.NullCigarUtil(reference_start=42)
self.assertEquals(42, c.reference_start)
self.assertEquals("*", c.cigar)
self.assertEquals(0, c.query_length)
self.assertEquals(True, c.is_valid)
self.assertEquals("*", c.softclip_target(0,100).cigar)
self.assertEquals(42, c.softclip_target(0,100).reference_start)
class CigartestCase(unittest.TestCase):
def test_cigar_factory(self):
mock_read = MicroMock(reference_start=42, cigarstring="10M")
c = cigar.cigar_factory(mock_read)
self.assertIsInstance(c, cigar.CigarUtil)
self.assertEquals(42, c.reference_start)
self.assertEquals("10M", c.cigar)
def test_cigar_factory_nullCigar(self):
mock_read = MicroMock(reference_start=42, cigarstring="*")
c = cigar.cigar_factory(mock_read)
self.assertIsInstance(c, cigar.NullCigarUtil)
self.assertEquals(42, c.reference_start)
self.assertEquals("*", c.cigar)
mock_read = MicroMock(reference_start=42, cigarstring="")
c = cigar.cigar_factory(mock_read)
self.assertIsInstance(c, cigar.NullCigarUtil)
self.assertEquals(42, c.reference_start)
self.assertEquals("*", c.cigar)
| |
import pytest
pytest.importorskip("ipywidgets")
from ipykernel.comm import Comm
from ipywidgets import Widget
#################
# Utility stuff #
#################
# Taken from ipywidgets/widgets/tests/test_interaction.py
# https://github.com/ipython/ipywidgets
# Licensed under Modified BSD. Copyright IPython Development Team. See:
# https://github.com/ipython/ipywidgets/blob/master/COPYING.md
class DummyComm(Comm):
comm_id = "a-b-c-d"
def open(self, *args, **kwargs):
pass
def send(self, *args, **kwargs):
pass
def close(self, *args, **kwargs):
pass
_widget_attrs = {}
displayed = []
undefined = object()
def setup():
_widget_attrs["_comm_default"] = getattr(Widget, "_comm_default", undefined)
Widget._comm_default = lambda self: DummyComm()
_widget_attrs["_ipython_display_"] = Widget._ipython_display_
def raise_not_implemented(*args, **kwargs):
raise NotImplementedError()
Widget._ipython_display_ = raise_not_implemented
def teardown():
for attr, value in _widget_attrs.items():
if value is undefined:
delattr(Widget, attr)
else:
setattr(Widget, attr, value)
def f(**kwargs):
pass
def clear_display():
global displayed
displayed = []
def record_display(*args):
displayed.extend(args)
# End code taken from ipywidgets
#####################
# Distributed stuff #
#####################
import re
from operator import add
from tlz import valmap
from distributed.client import wait
from distributed.diagnostics.progressbar import (
MultiProgressWidget,
ProgressWidget,
progress,
)
from distributed.utils_test import dec, gen_cluster, gen_tls_cluster, inc, throws
from distributed.worker import dumps_task
@gen_cluster(client=True)
async def test_progressbar_widget(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait(z)
progress = ProgressWidget([z.key], scheduler=s.address, complete=True)
await progress.listen()
assert progress.bar.value == 1.0
assert "3 / 3" in progress.bar_text.value
progress = ProgressWidget([z.key], scheduler=s.address)
await progress.listen()
@gen_cluster(client=True)
async def test_multi_progressbar_widget(c, s, a, b):
x1 = c.submit(inc, 1)
x2 = c.submit(inc, x1)
x3 = c.submit(inc, x2)
y1 = c.submit(dec, x3)
y2 = c.submit(dec, y1)
e = c.submit(throws, y2)
other = c.submit(inc, 123)
await wait([other, e])
p = MultiProgressWidget([e.key], scheduler=s.address, complete=True)
await p.listen()
assert p.bars["inc"].value == 1.0
assert p.bars["dec"].value == 1.0
assert p.bars["throws"].value == 0.0
assert "3 / 3" in p.bar_texts["inc"].value
assert "2 / 2" in p.bar_texts["dec"].value
assert "0 / 1" in p.bar_texts["throws"].value
assert p.bars["inc"].bar_style == "success"
assert p.bars["dec"].bar_style == "success"
assert p.bars["throws"].bar_style == "danger"
assert p.status == "error"
assert "Exception" in p.elapsed_time.value
try:
throws(1)
except Exception as e:
assert repr(e) in p.elapsed_time.value
capacities = [
int(re.search(r"\d+ / \d+", row.children[0].value).group().split(" / ")[1])
for row in p.bar_widgets.children
]
assert sorted(capacities, reverse=True) == capacities
@gen_cluster()
async def test_multi_progressbar_widget_after_close(s, a, b):
s.update_graph(
tasks=valmap(
dumps_task,
{
"x-1": (inc, 1),
"x-2": (inc, "x-1"),
"x-3": (inc, "x-2"),
"y-1": (dec, "x-3"),
"y-2": (dec, "y-1"),
"e": (throws, "y-2"),
"other": (inc, 123),
},
),
keys=["e"],
dependencies={
"x-2": {"x-1"},
"x-3": {"x-2"},
"y-1": {"x-3"},
"y-2": {"y-1"},
"e": {"y-2"},
},
)
p = MultiProgressWidget(["x-1", "x-2", "x-3"], scheduler=s.address)
await p.listen()
assert "x" in p.bars
def test_values(client):
L = [client.submit(inc, i) for i in range(5)]
wait(L)
p = MultiProgressWidget(L)
client.sync(p.listen)
assert set(p.bars) == {"inc"}
assert p.status == "finished"
assert p.comm.closed()
assert "5 / 5" in p.bar_texts["inc"].value
assert p.bars["inc"].value == 1.0
x = client.submit(throws, 1)
p = MultiProgressWidget([x])
client.sync(p.listen)
assert p.status == "error"
def test_progressbar_done(client):
L = [client.submit(inc, i) for i in range(5)]
wait(L)
p = ProgressWidget(L)
client.sync(p.listen)
assert p.status == "finished"
assert p.bar.value == 1.0
assert p.bar.bar_style == "success"
assert "Finished" in p.elapsed_time.value
f = client.submit(throws, L)
wait([f])
p = ProgressWidget([f])
client.sync(p.listen)
assert p.status == "error"
assert p.bar.value == 0.0
assert p.bar.bar_style == "danger"
assert "Exception" in p.elapsed_time.value
try:
throws(1)
except Exception as e:
assert repr(e) in p.elapsed_time.value
def test_progressbar_cancel(client):
import time
L = [client.submit(lambda: time.sleep(0.3), i) for i in range(5)]
p = ProgressWidget(L)
client.sync(p.listen)
L[-1].cancel()
wait(L[:-1])
assert p.status == "error"
assert p.bar.value == 0 # no tasks finish before cancel is called
@gen_cluster()
async def test_multibar_complete(s, a, b):
s.update_graph(
tasks=valmap(
dumps_task,
{
"x-1": (inc, 1),
"x-2": (inc, "x-1"),
"x-3": (inc, "x-2"),
"y-1": (dec, "x-3"),
"y-2": (dec, "y-1"),
"e": (throws, "y-2"),
"other": (inc, 123),
},
),
keys=["e"],
dependencies={
"x-2": {"x-1"},
"x-3": {"x-2"},
"y-1": {"x-3"},
"y-2": {"y-1"},
"e": {"y-2"},
},
)
p = MultiProgressWidget(["e"], scheduler=s.address, complete=True)
await p.listen()
assert p._last_response["all"] == {"x": 3, "y": 2, "e": 1}
assert all(b.value == 1.0 for k, b in p.bars.items() if k != "e")
assert "3 / 3" in p.bar_texts["x"].value
assert "2 / 2" in p.bar_texts["y"].value
def test_fast(client):
L = client.map(inc, range(100))
L2 = client.map(dec, L)
L3 = client.map(add, L, L2)
p = progress(L3, multi=True, complete=True, notebook=True)
client.sync(p.listen)
assert set(p._last_response["all"]) == {"inc", "dec", "add"}
@gen_cluster(client=True, client_kwargs={"serializers": ["msgpack"]})
async def test_serializers(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait(z)
progress = ProgressWidget([z], scheduler=s.address, complete=True)
await progress.listen()
assert progress.bar.value == 1.0
assert "3 / 3" in progress.bar_text.value
@gen_tls_cluster(client=True)
async def test_tls(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait(z)
progress = ProgressWidget([z], scheduler=s.address, complete=True)
await progress.listen()
assert progress.bar.value == 1.0
assert "3 / 3" in progress.bar_text.value
| |
import numpy as np
import pytest
from packaging.version import parse as parse_version
pytestmark = pytest.mark.gpu
import dask.array as da
from dask.array.numpy_compat import _numpy_120
from dask.array.utils import assert_eq
cupy = pytest.importorskip("cupy")
cupy_version = parse_version(cupy.__version__)
@pytest.mark.skipif(not _numpy_120, reason="NEP-35 is not available")
@pytest.mark.skipif(
cupy_version < parse_version("6.1.0"),
reason="Requires CuPy 6.1.0+ (with https://github.com/cupy/cupy/pull/2209)",
)
@pytest.mark.parametrize(
"m,n,chunks,error_type",
[
(20, 10, 10, None), # tall-skinny regular blocks
(20, 10, (3, 10), None), # tall-skinny regular fat layers
(20, 10, ((8, 4, 8), 10), None), # tall-skinny irregular fat layers
(40, 10, ((15, 5, 5, 8, 7), 10), None), # tall-skinny non-uniform chunks (why?)
(128, 2, (16, 2), None), # tall-skinny regular thin layers; recursion_depth=1
(
129,
2,
(16, 2),
None,
), # tall-skinny regular thin layers; recursion_depth=2 --> 17x2
(
130,
2,
(16, 2),
None,
), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next
(
131,
2,
(16, 2),
None,
), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next
(300, 10, (40, 10), None), # tall-skinny regular thin layers; recursion_depth=2
(300, 10, (30, 10), None), # tall-skinny regular thin layers; recursion_depth=3
(300, 10, (20, 10), None), # tall-skinny regular thin layers; recursion_depth=4
(10, 5, 10, None), # single block tall
(5, 10, 10, None), # single block short
(10, 10, 10, None), # single block square
(10, 40, (10, 10), ValueError), # short-fat regular blocks
(10, 40, (10, 15), ValueError), # short-fat irregular blocks
(
10,
40,
(10, (15, 5, 5, 8, 7)),
ValueError,
), # short-fat non-uniform chunks (why?)
(20, 20, 10, ValueError), # 2x2 regular blocks
],
)
def test_tsqr(m, n, chunks, error_type):
mat = cupy.random.rand(m, n)
data = da.from_array(mat, chunks=chunks, name="A", asarray=False)
# qr
m_q = m
n_q = min(m, n)
m_r = n_q
n_r = n
# svd
m_u = m
n_u = min(m, n)
n_s = n_q
m_vh = n_q
n_vh = n
d_vh = max(m_vh, n_vh) # full matrix returned
if error_type is None:
# test QR
q, r = da.linalg.tsqr(data)
assert_eq((m_q, n_q), q.shape) # shape check
assert_eq((m_r, n_r), r.shape) # shape check
assert_eq(mat, da.dot(q, r)) # accuracy check
assert_eq(cupy.eye(n_q, n_q), da.dot(q.T, q)) # q must be orthonormal
assert_eq(r, np.triu(r.rechunk(r.shape[0]))) # r must be upper triangular
# test SVD
u, s, vh = da.linalg.tsqr(data, compute_svd=True)
s_exact = np.linalg.svd(mat)[1]
assert_eq(s, s_exact) # s must contain the singular values
assert_eq((m_u, n_u), u.shape) # shape check
assert_eq((n_s,), s.shape) # shape check
assert_eq((d_vh, d_vh), vh.shape) # shape check
assert_eq(
np.eye(n_u, n_u), da.dot(u.T, u), check_type=False
) # u must be orthonormal
assert_eq(
np.eye(d_vh, d_vh), da.dot(vh, vh.T), check_type=False
) # vh must be orthonormal
assert_eq(mat, da.dot(da.dot(u, da.diag(s)), vh[:n_q])) # accuracy check
else:
with pytest.raises(error_type):
q, r = da.linalg.tsqr(data)
with pytest.raises(error_type):
u, s, vh = da.linalg.tsqr(data, compute_svd=True)
@pytest.mark.parametrize(
"m_min,n_max,chunks,vary_rows,vary_cols,error_type",
[
(10, 5, (10, 5), True, False, None), # single block tall
(10, 5, (10, 5), False, True, None), # single block tall
(10, 5, (10, 5), True, True, None), # single block tall
(40, 5, (10, 5), True, False, None), # multiple blocks tall
(40, 5, (10, 5), False, True, None), # multiple blocks tall
(40, 5, (10, 5), True, True, None), # multiple blocks tall
(
300,
10,
(40, 10),
True,
False,
None,
), # tall-skinny regular thin layers; recursion_depth=2
(
300,
10,
(30, 10),
True,
False,
None,
), # tall-skinny regular thin layers; recursion_depth=3
(
300,
10,
(20, 10),
True,
False,
None,
), # tall-skinny regular thin layers; recursion_depth=4
(
300,
10,
(40, 10),
False,
True,
None,
), # tall-skinny regular thin layers; recursion_depth=2
(
300,
10,
(30, 10),
False,
True,
None,
), # tall-skinny regular thin layers; recursion_depth=3
(
300,
10,
(20, 10),
False,
True,
None,
), # tall-skinny regular thin layers; recursion_depth=4
(
300,
10,
(40, 10),
True,
True,
None,
), # tall-skinny regular thin layers; recursion_depth=2
(
300,
10,
(30, 10),
True,
True,
None,
), # tall-skinny regular thin layers; recursion_depth=3
(
300,
10,
(20, 10),
True,
True,
None,
), # tall-skinny regular thin layers; recursion_depth=4
],
)
def test_tsqr_uncertain(m_min, n_max, chunks, vary_rows, vary_cols, error_type):
mat = cupy.random.rand(m_min * 2, n_max)
m, n = m_min * 2, n_max
mat[0:m_min, 0] += 1
_c0 = mat[:, 0]
_r0 = mat[0, :]
c0 = da.from_array(_c0, chunks=m_min, name="c", asarray=False)
r0 = da.from_array(_r0, chunks=n_max, name="r", asarray=False)
data = da.from_array(mat, chunks=chunks, name="A", asarray=False)
if vary_rows:
data = data[c0 > 0.5, :]
mat = mat[_c0 > 0.5, :]
m = mat.shape[0]
if vary_cols:
data = data[:, r0 > 0.5]
mat = mat[:, _r0 > 0.5]
n = mat.shape[1]
# qr
m_q = m
n_q = min(m, n)
m_r = n_q
n_r = n
# svd
m_u = m
n_u = min(m, n)
n_s = n_q
m_vh = n_q
n_vh = n
d_vh = max(m_vh, n_vh) # full matrix returned
if error_type is None:
# test QR
q, r = da.linalg.tsqr(data)
q = q.compute() # because uncertainty
r = r.compute()
assert_eq((m_q, n_q), q.shape) # shape check
assert_eq((m_r, n_r), r.shape) # shape check
assert_eq(mat, np.dot(q, r)) # accuracy check
assert_eq(
np.eye(n_q, n_q), np.dot(q.T, q), check_type=False
) # q must be orthonormal
assert_eq(r, np.triu(r)) # r must be upper triangular
# test SVD
u, s, vh = da.linalg.tsqr(data, compute_svd=True)
u = u.compute() # because uncertainty
s = s.compute()
vh = vh.compute()
s_exact = np.linalg.svd(mat)[1]
assert_eq(s, s_exact) # s must contain the singular values
assert_eq((m_u, n_u), u.shape) # shape check
assert_eq((n_s,), s.shape) # shape check
assert_eq((d_vh, d_vh), vh.shape) # shape check
assert_eq(
np.eye(n_u, n_u), np.dot(u.T, u), check_type=False
) # u must be orthonormal
assert_eq(
np.eye(d_vh, d_vh), np.dot(vh, vh.T), check_type=False
) # vh must be orthonormal
assert_eq(
mat, np.dot(np.dot(u, np.diag(s)), vh[:n_q]), check_type=False
) # accuracy check
else:
with pytest.raises(error_type):
q, r = da.linalg.tsqr(data)
with pytest.raises(error_type):
u, s, vh = da.linalg.tsqr(data, compute_svd=True)
@pytest.mark.parametrize(
"m,n,chunks,error_type",
[
(20, 10, 10, ValueError), # tall-skinny regular blocks
(20, 10, (3, 10), ValueError), # tall-skinny regular fat layers
(20, 10, ((8, 4, 8), 10), ValueError), # tall-skinny irregular fat layers
(
40,
10,
((15, 5, 5, 8, 7), 10),
ValueError,
), # tall-skinny non-uniform chunks (why?)
(
128,
2,
(16, 2),
ValueError,
), # tall-skinny regular thin layers; recursion_depth=1
(
129,
2,
(16, 2),
ValueError,
), # tall-skinny regular thin layers; recursion_depth=2 --> 17x2
(
130,
2,
(16, 2),
ValueError,
), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next
(
131,
2,
(16, 2),
ValueError,
), # tall-skinny regular thin layers; recursion_depth=2 --> 18x2 next
(
300,
10,
(40, 10),
ValueError,
), # tall-skinny regular thin layers; recursion_depth=2
(
300,
10,
(30, 10),
ValueError,
), # tall-skinny regular thin layers; recursion_depth=3
(
300,
10,
(20, 10),
ValueError,
), # tall-skinny regular thin layers; recursion_depth=4
(10, 5, 10, None), # single block tall
(5, 10, 10, None), # single block short
(10, 10, 10, None), # single block square
(10, 40, (10, 10), None), # short-fat regular blocks
(10, 40, (10, 15), None), # short-fat irregular blocks
(10, 40, (10, (15, 5, 5, 8, 7)), None), # short-fat non-uniform chunks (why?)
(20, 20, 10, ValueError), # 2x2 regular blocks
],
)
def test_sfqr(m, n, chunks, error_type):
mat = np.random.rand(m, n)
data = da.from_array(mat, chunks=chunks, name="A")
m_q = m
n_q = min(m, n)
m_r = n_q
n_r = n
m_qtq = n_q
if error_type is None:
q, r = da.linalg.sfqr(data)
assert_eq((m_q, n_q), q.shape) # shape check
assert_eq((m_r, n_r), r.shape) # shape check
assert_eq(mat, da.dot(q, r)) # accuracy check
assert_eq(np.eye(m_qtq, m_qtq), da.dot(q.T, q)) # q must be orthonormal
assert_eq(r, da.triu(r.rechunk(r.shape[0]))) # r must be upper triangular
else:
with pytest.raises(error_type):
q, r = da.linalg.sfqr(data)
@pytest.mark.skipif(not _numpy_120, reason="NEP-35 is not available")
@pytest.mark.parametrize("iscomplex", [False, True])
@pytest.mark.parametrize(("nrow", "ncol", "chunk"), [(20, 10, 5), (100, 10, 10)])
def test_lstsq(nrow, ncol, chunk, iscomplex):
cupy.random.seed(1)
A = cupy.random.randint(1, 20, (nrow, ncol))
b = cupy.random.randint(1, 20, nrow)
if iscomplex:
A = A + 1.0j * cupy.random.randint(1, 20, A.shape)
b = b + 1.0j * cupy.random.randint(1, 20, b.shape)
dA = da.from_array(A, (chunk, ncol))
db = da.from_array(b, chunk)
x, r, rank, s = cupy.linalg.lstsq(A, b, rcond=-1)
dx, dr, drank, ds = da.linalg.lstsq(dA, db)
assert_eq(dx, x)
assert_eq(dr, r)
assert drank.compute() == rank
assert_eq(ds, s)
# reduce rank causes multicollinearity, only compare rank
A[:, 1] = A[:, 2]
dA = da.from_array(A, (chunk, ncol))
db = da.from_array(b, chunk)
x, r, rank, s = cupy.linalg.lstsq(
A, b, rcond=cupy.finfo(cupy.double).eps * max(nrow, ncol)
)
assert rank == ncol - 1
dx, dr, drank, ds = da.linalg.lstsq(dA, db)
assert drank.compute() == rank
# 2D case
A = cupy.random.randint(1, 20, (nrow, ncol))
b2D = cupy.random.randint(1, 20, (nrow, ncol // 2))
if iscomplex:
A = A + 1.0j * cupy.random.randint(1, 20, A.shape)
b2D = b2D + 1.0j * cupy.random.randint(1, 20, b2D.shape)
dA = da.from_array(A, (chunk, ncol))
db2D = da.from_array(b2D, (chunk, ncol // 2))
x, r, rank, s = cupy.linalg.lstsq(A, b2D, rcond=-1)
dx, dr, drank, ds = da.linalg.lstsq(dA, db2D)
assert_eq(dx, x)
assert_eq(dr, r)
assert drank.compute() == rank
assert_eq(ds, s)
def _get_symmat(size):
cupy.random.seed(1)
A = cupy.random.randint(1, 21, (size, size))
lA = cupy.tril(A)
return lA.dot(lA.T)
@pytest.mark.parametrize(("shape", "chunk"), [(20, 10), (12, 3), (30, 3), (30, 6)])
def test_cholesky(shape, chunk):
scipy_linalg = pytest.importorskip("scipy.linalg")
A = _get_symmat(shape)
dA = da.from_array(A, (chunk, chunk))
# Need to take the transpose because default in `cupy.linalg.cholesky` is
# to return lower triangle
assert_eq(
da.linalg.cholesky(dA),
cupy.linalg.cholesky(A).T,
check_graph=False,
check_chunks=False,
)
assert_eq(
da.linalg.cholesky(dA, lower=True).map_blocks(cupy.asnumpy),
scipy_linalg.cholesky(cupy.asnumpy(A), lower=True),
check_graph=False,
check_chunks=False,
)
| |
# Copyright 2016 Joel Dunham
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the :class:`FormsearchesController`.
.. module:: formsearches
:synopsis: Contains the form searches controller.
"""
import logging
import datetime
import simplejson as json
from pylons import request, response, session, config
from formencode.validators import Invalid
from onlinelinguisticdatabase.lib.base import BaseController
from onlinelinguisticdatabase.lib.schemata import FormSearchSchema
import onlinelinguisticdatabase.lib.helpers as h
from onlinelinguisticdatabase.lib.SQLAQueryBuilder import SQLAQueryBuilder, OLDSearchParseError
from onlinelinguisticdatabase.model.meta import Session
from onlinelinguisticdatabase.model import FormSearch
log = logging.getLogger(__name__)
class FormsearchesController(BaseController):
"""Generate responses to requests on form search resources.
REST Controller styled on the Atom Publishing Protocol.
.. note::
The ``h.jsonify`` decorator converts the return value of the methods to
JSON.
"""
query_builder = SQLAQueryBuilder('FormSearch', config=config)
@h.jsonify
@h.restrict('SEARCH', 'POST')
@h.authenticate
def search(self):
"""Return the list of form search resources matching the input JSON query.
:URL: ``SEARCH /formsearches`` (or ``POST /formsearches/search``)
:request body: A JSON object of the form::
{"query": {"filter": [ ... ], "order_by": [ ... ]},
"paginator": { ... }}
where the ``order_by`` and ``paginator`` attributes are optional.
.. note::
Yes, that's right, you can search form searches. (No, you can't
search searches of form searches :)
"""
try:
json_search_params = unicode(request.body, request.charset)
python_search_params = json.loads(json_search_params)
query = h.eagerload_form_search(
self.query_builder.get_SQLA_query(python_search_params.get('query')))
return h.add_pagination(query, python_search_params.get('paginator'))
except h.JSONDecodeError:
response.status_int = 400
return h.JSONDecodeErrorResponse
except (OLDSearchParseError, Invalid), e:
response.status_int = 400
return {'errors': e.unpack_errors()}
except:
response.status_int = 400
return {'error': u'The specified search parameters generated an invalid database query'}
@h.jsonify
@h.restrict('GET')
@h.authenticate
def new_search(self):
"""Return the data necessary to search the form search resources.
:URL: ``GET /formsearches/new_search``
:returns: ``{"search_parameters": {"attributes": { ... }, "relations": { ... }}``
"""
return {'search_parameters': h.get_search_parameters(self.query_builder)}
@h.jsonify
@h.restrict('GET')
@h.authenticate
def index(self):
"""Get all form search resources.
:URL: ``GET /formsearches`` with optional query string parameters for
ordering and pagination.
:returns: a list of all form search resources.
.. note::
See :func:`utils.add_order_by` and :func:`utils.add_pagination` for the
query string parameters that effect ordering and pagination.
"""
try:
query = h.eagerload_form_search(Session.query(FormSearch))
query = h.add_order_by(query, dict(request.GET), self.query_builder)
return h.add_pagination(query, dict(request.GET))
except Invalid, e:
response.status_int = 400
return {'errors': e.unpack_errors()}
@h.jsonify
@h.restrict('POST')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def create(self):
"""Create a new form search resource and return it.
:URL: ``POST /formsearches``
:request body: JSON object representing the form search to create.
:returns: the newly created form search.
"""
try:
schema = FormSearchSchema()
values = json.loads(unicode(request.body, request.charset))
state = h.get_state_object(values)
state.config = config
data = schema.to_python(values, state)
form_search = create_new_form_search(data)
Session.add(form_search)
Session.commit()
return form_search
except h.JSONDecodeError:
response.status_int = 400
return h.JSONDecodeErrorResponse
except Invalid, e:
response.status_int = 400
return {'errors': e.unpack_errors()}
@h.jsonify
@h.restrict('GET')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def new(self):
"""GET /formsearches/new: Return the data necessary to create a new OLD
form search.
"""
"""Return the data necessary to create a new form search.
:URL: ``GET /formsearches/new`` with optional query string parameters
:returns: A dictionary of lists of resources
"""
return {'search_parameters': h.get_search_parameters(self.query_builder)}
@h.jsonify
@h.restrict('PUT')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def update(self, id):
"""Update a form search and return it.
:URL: ``PUT /formsearches/id``
:Request body: JSON object representing the form search with updated
attribute values.
:param str id: the ``id`` value of the form search to be updated.
:returns: the updated form search model.
"""
form_search = h.eagerload_form_search(Session.query(FormSearch)).get(int(id))
if form_search:
try:
schema = FormSearchSchema()
values = json.loads(unicode(request.body, request.charset))
state = h.get_state_object(values)
state.id = id
state.config = config
data = schema.to_python(values, state)
form_search = update_form_search(form_search, data)
# form_search will be False if there are no changes (cf. update_form_search).
if form_search:
Session.add(form_search)
Session.commit()
return form_search
else:
response.status_int = 400
return {'error':
u'The update request failed because the submitted data were not new.'}
except h.JSONDecodeError:
response.status_int = 400
return h.JSONDecodeErrorResponse
except Invalid, e:
response.status_int = 400
return {'errors': e.unpack_errors()}
else:
response.status_int = 404
return {'error': 'There is no form search with id %s' % id}
@h.jsonify
@h.restrict('DELETE')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def delete(self, id):
"""Delete an existing form search and return it.
:URL: ``DELETE /formsearches/id``
:param str id: the ``id`` value of the form search to be deleted.
:returns: the deleted form search model.
"""
form_search = h.eagerload_form_search(Session.query(FormSearch)).get(id)
if form_search:
Session.delete(form_search)
Session.commit()
return form_search
else:
response.status_int = 404
return {'error': 'There is no form search with id %s' % id}
@h.jsonify
@h.restrict('GET')
@h.authenticate
def show(self, id):
"""Return a form search.
:URL: ``GET /formsearches/id``
:param str id: the ``id`` value of the form search to be returned.
:returns: a form search model object.
"""
form_search = h.eagerload_form_search(Session.query(FormSearch)).get(id)
if form_search:
return form_search
else:
response.status_int = 404
return {'error': 'There is no form search with id %s' % id}
@h.jsonify
@h.restrict('GET')
@h.authenticate
@h.authorize(['administrator', 'contributor'])
def edit(self, id):
"""GET /formsearches/id/edit: Return the data necessary to update an existing
OLD form search.
"""
"""Return a form search and the data needed to update it.
:URL: ``GET /formsearches/edit`` with optional query string parameters
:param str id: the ``id`` value of the form search that will be updated.
:returns: a dictionary of the form::
{"form_search": {...}, "data": {...}}
where the value of the ``form_search`` key is a dictionary
representation of the form search and the value of the ``data`` key
is a dictionary containing the data necessary to update a form
search.
"""
form_search = h.eagerload_form_search(Session.query(FormSearch)).get(id)
if form_search:
data = {'search_parameters': h.get_search_parameters(self.query_builder)}
return {'data': data, 'form_search': form_search}
else:
response.status_int = 404
return {'error': 'There is no form search with id %s' % id}
################################################################################
# FormSearch Create & Update Functions
################################################################################
def create_new_form_search(data):
"""Create a new form search.
:param dict data: the form search to be created.
:returns: an form search model object.
"""
form_search = FormSearch()
form_search.name = h.normalize(data['name'])
form_search.search = data['search'] # Note that this is purposefully not normalized (reconsider this? ...)
form_search.description = h.normalize(data['description'])
form_search.enterer = session['user']
form_search.datetime_modified = datetime.datetime.utcnow()
return form_search
def update_form_search(form_search, data):
"""Update a form search model.
:param form: the form search model to be updated.
:param dict data: representation of the updated form search.
:returns: the updated form search model or, if ``changed`` has not been set
to ``True``, then ``False``.
"""
changed = False
# Unicode Data
changed = form_search.set_attr('name', h.normalize(data['name']), changed)
changed = form_search.set_attr('search', data['search'], changed)
changed = form_search.set_attr('description', h.normalize(data['description']), changed)
if changed:
form_search.datetime_modified = datetime.datetime.utcnow()
return form_search
return changed
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Cloud Datastore helper functions."""
import sys
# Protect against environments where datastore library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from google.cloud.proto.datastore.v1 import datastore_pb2
from google.cloud.proto.datastore.v1 import entity_pb2
from google.cloud.proto.datastore.v1 import query_pb2
from googledatastore import PropertyFilter, CompositeFilter
from googledatastore import helper as datastore_helper
from googledatastore.connection import Datastore
from googledatastore.connection import RPCError
QUERY_NOT_FINISHED = query_pb2.QueryResultBatch.NOT_FINISHED
except ImportError:
QUERY_NOT_FINISHED = None
# pylint: enable=wrong-import-order, wrong-import-position
from apache_beam.internal.gcp import auth
from apache_beam.utils import retry
def key_comparator(k1, k2):
"""A comparator for Datastore keys.
Comparison is only valid for keys in the same partition. The comparison here
is between the list of paths for each key.
"""
if k1.partition_id != k2.partition_id:
raise ValueError('Cannot compare keys with different partition ids.')
k2_iter = iter(k2.path)
for k1_path in k1.path:
k2_path = next(k2_iter, None)
if not k2_path:
return 1
result = compare_path(k1_path, k2_path)
if result != 0:
return result
k2_path = next(k2_iter, None)
if k2_path:
return -1
else:
return 0
def compare_path(p1, p2):
"""A comparator for key path.
A path has either an `id` or a `name` field defined. The
comparison works with the following rules:
1. If one path has `id` defined while the other doesn't, then the
one with `id` defined is considered smaller.
2. If both paths have `id` defined, then their ids are compared.
3. If no `id` is defined for both paths, then their `names` are compared.
"""
result = str_compare(p1.kind, p2.kind)
if result != 0:
return result
if p1.HasField('id'):
if not p2.HasField('id'):
return -1
return p1.id - p2.id
if p2.HasField('id'):
return 1
return str_compare(p1.name, p2.name)
def str_compare(s1, s2):
if s1 == s2:
return 0
elif s1 < s2:
return -1
else:
return 1
def get_datastore(project):
"""Returns a Cloud Datastore client."""
credentials = auth.get_service_credentials()
return Datastore(project, credentials, host='batch-datastore.googleapis.com')
def make_request(project, namespace, query):
"""Make a Cloud Datastore request for the given query."""
req = datastore_pb2.RunQueryRequest()
req.partition_id.CopyFrom(make_partition(project, namespace))
req.query.CopyFrom(query)
return req
def make_partition(project, namespace):
"""Make a PartitionId for the given project and namespace."""
partition = entity_pb2.PartitionId()
partition.project_id = project
if namespace is not None:
partition.namespace_id = namespace
return partition
def retry_on_rpc_error(exception):
"""A retry filter for Cloud Datastore RPCErrors."""
if isinstance(exception, RPCError):
if exception.code >= 500:
return True
else:
return False
else:
# TODO(vikasrk): Figure out what other errors should be retried.
return False
def fetch_entities(project, namespace, query, datastore):
"""A helper method to fetch entities from Cloud Datastore.
Args:
project: Project ID
namespace: Cloud Datastore namespace
query: Query to be read from
datastore: Cloud Datastore Client
Returns:
An iterator of entities.
"""
return QueryIterator(project, namespace, query, datastore)
def is_key_valid(key):
"""Returns True if a Cloud Datastore key is complete.
A key is complete if its last element has either an id or a name.
"""
if not key.path:
return False
return key.path[-1].HasField('id') or key.path[-1].HasField('name')
def write_mutations(datastore, project, mutations):
"""A helper function to write a batch of mutations to Cloud Datastore.
If a commit fails, it will be retried upto 5 times. All mutations in the
batch will be committed again, even if the commit was partially successful.
If the retry limit is exceeded, the last exception from Cloud Datastore will
be raised.
"""
commit_request = datastore_pb2.CommitRequest()
commit_request.mode = datastore_pb2.CommitRequest.NON_TRANSACTIONAL
commit_request.project_id = project
for mutation in mutations:
commit_request.mutations.add().CopyFrom(mutation)
@retry.with_exponential_backoff(num_retries=5,
retry_filter=retry_on_rpc_error)
def commit(req):
datastore.commit(req)
commit(commit_request)
def make_latest_timestamp_query(namespace):
"""Make a Query to fetch the latest timestamp statistics."""
query = query_pb2.Query()
if namespace is None:
query.kind.add().name = '__Stat_Total__'
else:
query.kind.add().name = '__Stat_Ns_Total__'
# Descending order of `timestamp`
datastore_helper.add_property_orders(query, "-timestamp")
# Only get the latest entity
query.limit.value = 1
return query
def make_kind_stats_query(namespace, kind, latest_timestamp):
"""Make a Query to fetch the latest kind statistics."""
kind_stat_query = query_pb2.Query()
if namespace is None:
kind_stat_query.kind.add().name = '__Stat_Kind__'
else:
kind_stat_query.kind.add().name = '__Stat_Ns_Kind__'
kind_filter = datastore_helper.set_property_filter(
query_pb2.Filter(), 'kind_name', PropertyFilter.EQUAL, unicode(kind))
timestamp_filter = datastore_helper.set_property_filter(
query_pb2.Filter(), 'timestamp', PropertyFilter.EQUAL,
latest_timestamp)
datastore_helper.set_composite_filter(kind_stat_query.filter,
CompositeFilter.AND, kind_filter,
timestamp_filter)
return kind_stat_query
class QueryIterator(object):
"""A iterator class for entities of a given query.
Entities are read in batches. Retries on failures.
"""
_NOT_FINISHED = QUERY_NOT_FINISHED
# Maximum number of results to request per query.
_BATCH_SIZE = 500
def __init__(self, project, namespace, query, datastore):
self._query = query
self._datastore = datastore
self._project = project
self._namespace = namespace
self._start_cursor = None
self._limit = self._query.limit.value or sys.maxint
self._req = make_request(project, namespace, query)
@retry.with_exponential_backoff(num_retries=5,
retry_filter=retry_on_rpc_error)
def _next_batch(self):
"""Fetches the next batch of entities."""
if self._start_cursor is not None:
self._req.query.start_cursor = self._start_cursor
# set batch size
self._req.query.limit.value = min(self._BATCH_SIZE, self._limit)
resp = self._datastore.run_query(self._req)
return resp
def __iter__(self):
more_results = True
while more_results:
resp = self._next_batch()
for entity_result in resp.batch.entity_results:
yield entity_result.entity
self._start_cursor = resp.batch.end_cursor
num_results = len(resp.batch.entity_results)
self._limit -= num_results
# Check if we need to read more entities.
# True when query limit hasn't been satisfied and there are more entities
# to be read. The latter is true if the response has a status
# `NOT_FINISHED` or if the number of results read in the previous batch
# is equal to `_BATCH_SIZE` (all indications that there is more data be
# read).
more_results = ((self._limit > 0) and
((num_results == self._BATCH_SIZE) or
(resp.batch.more_results == self._NOT_FINISHED)))
| |
"""
Tests for models/states
"""
import unittest2
class StatesTests(unittest2.TestCase):
def setUp(self):
super(StatesTests, self).setUp()
self.config = {}
self.config['CARD_STATES'] = (
'Backlog',
'In Progress',
'Deploy',
'Done',
)
def _get_target_class(self):
from kardboard.models import States
return States
def _make_one(self, *args, **kwargs):
if 'config' not in kwargs.keys():
kwargs['config'] = self.config
return self._get_target_class()(*args, **kwargs)
def test_orderable(self):
states = self._make_one()
expected = ['Backlog']
actual = states.orderable
assert expected == actual
def test_find_by_slug(self):
states = self._make_one()
expected = 'Deploy'
actual = states.find_by_slug('deploy')
assert expected == actual
def test_iteration(self):
states = self._make_one()
expected = [state for state in self.config['CARD_STATES']]
actual = [state for state in states]
self.assertEqual(expected, actual)
def test_default_state_groups(self):
states = self._make_one()
expected = 'Backlog'
self.assertEqual(expected, states.backlog)
self.assertEqual([expected, ], states.pre_start)
expected = 'In Progress'
self.assertEqual(expected, states.start)
expected = ['In Progress', 'Deploy']
self.assertEqual(expected, states.in_progress)
expected = 'Done'
self.assertEqual(expected, states.done)
def test_configured_state_groups(self):
self.config['CARD_STATES'] = (
'Backlog',
'Planning',
'In Progress',
'Testing',
'Deploy',
'Done',
'Archive',
)
self.config['BACKLOG_STATE'] = 0
self.config['START_STATE'] = 2
self.config['DONE_STATE'] = -2
states = self._make_one()
expected = ['Backlog', 'Planning']
self.assertEqual(expected[0], states.backlog)
self.assertEqual(expected, states.pre_start)
expected = 'In Progress'
self.assertEqual(expected, states.start)
expected = ['Planning', 'In Progress', 'Testing', 'Deploy']
self.assertEqual(expected, states.in_progress)
expected = 'Done'
self.assertEqual(expected, states.done)
def test_for_forms(self):
states = self._make_one()
expected = (
('', ''),
('Backlog', 'Backlog'),
('In Progress', 'In Progress'),
('Deploy', 'Deploy'),
('Done', 'Done'),
)
self.assertEqual(expected, states.for_forms)
class BufferStatesTests(unittest2.TestCase):
def setUp(self):
CARD_STATES = [
'Backlog',
('Elaborating', 'Ready: Building'),
('Building', 'Ready: Testing',),
('Testing', 'Build to OTIS',),
('OTIS Verify', 'Prodward Bound',),
'Done',
]
BACKLOG_STATE = 0
START_STATE = 3
DONE_STATE = -1
self.config = {
'CARD_STATES': CARD_STATES,
'BACKLOG_STATE': BACKLOG_STATE,
'START_STATE': START_STATE,
'DONE_STATE': DONE_STATE,
}
self.states = self._make_one()
def _get_target_class(self):
from kardboard.models import States
return States
def _make_one(self, *args, **kwargs):
if 'config' not in kwargs.keys():
kwargs['config'] = self.config
return self._get_target_class()(*args, **kwargs)
def test_find_start(self):
assert "Building" == self.states.start
def test_find_backlog(self):
assert "Backlog" == self.states.backlog
def test_find_done(self):
assert "Done" == self.states.done
def test_pre_start(self):
expected = ['Backlog', 'Elaborating', 'Ready: Building']
assert expected == self.states.pre_start
def test_in_progress(self):
expected = [
'Elaborating',
'Ready: Building',
'Building',
'Ready: Testing',
'Testing',
'Build to OTIS',
'OTIS Verify',
'Prodward Bound',
]
assert expected == self.states.in_progress
def test_iteration(self):
expected = [
'Backlog',
'Elaborating',
'Ready: Building',
'Building',
'Ready: Testing',
'Testing',
'Build to OTIS',
'OTIS Verify',
'Prodward Bound',
'Done',
]
assert expected == [state for state in self.states]
def test_str(self):
expected = [
'Backlog',
'Elaborating',
'Ready: Building',
'Building',
'Ready: Testing',
'Testing',
'Build to OTIS',
'OTIS Verify',
'Prodward Bound',
'Done',
]
expected = str(expected)
assert expected == str(self.states)
def test_by_index(self):
assert "Backlog" == self.states[0]
assert "Done" == self.states[-1]
assert "Ready: Building" == self.states[2]
def test_find_by_slug(self):
expected = "Build to OTIS"
assert expected == self.states.find_by_slug('build-to-otis')
def test_orderable(self):
expected = ['Backlog']
assert expected == self.states.orderable
def test_index(self):
assert 0 == self.states.index("Backlog")
def test_for_forms(self):
expected = (
('', ''),
('Backlog', 'Backlog'),
('Elaborating', 'Elaborating'),
('Ready: Building', 'Ready: Building'),
('Building', 'Building'),
('Ready: Testing', 'Ready: Testing'),
('Testing', 'Testing'),
('Build to OTIS', 'Build to OTIS'),
('OTIS Verify', 'OTIS Verify'),
('Prodward Bound', 'Prodward Bound'),
('Done', 'Done'),
)
assert expected == self.states.for_forms
def test_active_states(self):
assert 6 == len(self.states.active)
| |
from copy import copy
from datetime import datetime
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django_dynamic_fixture import G
from mock import patch
import pytz
from data_schema.models import DataSchema, FieldSchema, FieldOption
from data_schema.field_schema_type import FieldSchemaCase, FieldSchemaType
from data_schema.convert_value import ValueConverter
class ValueConverterTest(TestCase):
def test_is_numeric(self):
converter = ValueConverter(FieldSchemaType.FLOAT, float)
self.assertTrue(converter.is_numeric(0))
self.assertTrue(converter.is_numeric(100))
self.assertTrue(converter.is_numeric(1.34))
self.assertTrue(converter.is_numeric(1.34e2))
self.assertFalse(converter.is_numeric('foo'))
self.assertFalse(converter.is_numeric({'foo': 'bar'}))
class FieldSchemaTypeTest(TestCase):
def test_choices(self):
expected_choices = set([
('DATE', 'DATE'),
('DATETIME', 'DATETIME'),
('DATE_FLOORED', 'DATE_FLOORED'),
('INT', 'INT'),
('FLOAT', 'FLOAT'),
('STRING', 'STRING'),
('BOOLEAN', 'BOOLEAN'),
('DURATION', 'DURATION'),
])
self.assertEquals(expected_choices, set(FieldSchemaType.choices()))
def test_alphabetical(self):
choices = FieldSchemaType.choices()
sorted_choices = copy(choices)
sorted_choices.sort()
self.assertListEqual(choices, sorted_choices)
class DataSchemaUpdateTest(TestCase):
"""
Tests the update method in the DataSchema model.
"""
def test_update_no_values(self):
ds = DataSchema()
ds.update()
self.assertIsNotNone(ds.id)
def test_update_with_model_ctype_none(self):
ds = DataSchema()
ds.update(model_content_type=None)
self.assertIsNone(ds.model_content_type)
def test_update_with_model_ctype_not_none(self):
ds = DataSchema()
ds.update(model_content_type=ContentType.objects.get_for_model(ds))
self.assertEquals(ds.model_content_type, ContentType.objects.get_for_model(ds))
def test_empty_field_schema_set(self):
ds = DataSchema()
ds.update(fieldschema_set=[])
self.assertEquals(FieldSchema.objects.count(), 0)
def test_empty_field_schema_set_with_preexisting(self):
ds = G(DataSchema)
G(FieldSchema, data_schema=ds)
ds.update(fieldschema_set=[])
self.assertEquals(FieldSchema.objects.count(), 0)
def test_field_schema_set_creation_with_basic_values(self):
ds = DataSchema()
ds.update(fieldschema_set=[{
'field_key': 'email',
'field_type': 'STRING'
}])
fs = ds.fieldschema_set.get()
self.assertEquals(fs.field_key, 'email')
self.assertEquals(fs.field_type, 'STRING')
def test_field_schema_set_preexisting_values(self):
ds = G(DataSchema)
G(FieldSchema, field_key='email', display_name='Email!', data_schema=ds)
G(FieldSchema, field_key='hi', data_schema=ds)
ds.update(fieldschema_set=[{
'field_key': 'email',
'field_type': 'STRING',
'display_name': 'Email',
'uniqueness_order': 1,
'field_position': 1,
'field_format': 'format',
'default_value': '',
'transform_case': FieldSchemaCase.LOWER,
}, {
'field_key': 'date',
'field_type': 'DATETIME',
'display_name': 'Date',
'uniqueness_order': 2,
'field_position': 2,
'field_format': 'format2',
'default_value': 'default',
}])
self.assertEquals(FieldSchema.objects.count(), 2)
fs = ds.fieldschema_set.all().order_by('field_key')[0]
self.assertEquals(fs.field_key, 'date')
self.assertEquals(fs.field_type, 'DATETIME')
self.assertEquals(fs.display_name, 'Date')
self.assertEquals(fs.uniqueness_order, 2)
self.assertEquals(fs.field_position, 2)
self.assertEquals(fs.field_format, 'format2')
self.assertEquals(fs.default_value, 'default')
self.assertFalse(fs.has_options)
self.assertIsNone(fs.transform_case)
fs = ds.fieldschema_set.all().order_by('field_key')[1]
self.assertEquals(fs.field_key, 'email')
self.assertEquals(fs.field_type, 'STRING')
self.assertEquals(fs.display_name, 'Email')
self.assertEquals(fs.uniqueness_order, 1)
self.assertEquals(fs.field_position, 1)
self.assertEquals(fs.field_format, 'format')
self.assertEquals(fs.default_value, '')
self.assertFalse(fs.has_options)
self.assertEquals(fs.transform_case, FieldSchemaCase.LOWER)
def test_field_schema_set_preexisting_values_w_options(self):
ds = G(DataSchema)
G(FieldSchema, field_key='email', display_name='Email!', data_schema=ds)
G(FieldSchema, field_key='hi', data_schema=ds)
ds.update(fieldschema_set=[{
'field_key': 'email',
'field_type': 'STRING',
'display_name': 'Email',
'uniqueness_order': 1,
'field_position': 1,
'field_format': 'format',
'default_value': '',
'fieldoption_set': ['option1', 'option2'],
}, {
'field_key': 'date',
'field_type': 'DATETIME',
'display_name': 'Date',
'uniqueness_order': 2,
'field_position': 2,
'field_format': 'format2',
'default_value': 'default',
'fieldoption_set': ['option3', 'option4'],
}])
self.assertEquals(FieldSchema.objects.count(), 2)
fs = ds.fieldschema_set.all().order_by('field_key')[0]
self.assertEquals(fs.field_key, 'date')
self.assertEquals(fs.field_type, 'DATETIME')
self.assertEquals(fs.display_name, 'Date')
self.assertEquals(fs.uniqueness_order, 2)
self.assertEquals(fs.field_position, 2)
self.assertEquals(fs.field_format, 'format2')
self.assertEquals(fs.default_value, 'default')
self.assertTrue(fs.has_options)
self.assertEquals(set(['option3', 'option4']), set(fs.fieldoption_set.values_list('value', flat=True)))
fs = ds.fieldschema_set.all().order_by('field_key')[1]
self.assertEquals(fs.field_key, 'email')
self.assertEquals(fs.field_type, 'STRING')
self.assertEquals(fs.display_name, 'Email')
self.assertEquals(fs.uniqueness_order, 1)
self.assertEquals(fs.field_position, 1)
self.assertEquals(fs.field_format, 'format')
self.assertEquals(fs.default_value, '')
self.assertTrue(fs.has_options)
self.assertEquals(set(['option1', 'option2']), set(fs.fieldoption_set.values_list('value', flat=True)))
class DataSchemaTest(TestCase):
"""
Tests the DataSchema model.
"""
def test_get_value_exception(self):
"""
Tests that when we fail to parse a value, we get a ValueError with additional information attached.
"""
bad_value = '-'
field_key = 'number'
data_schema = G(DataSchema)
G(
FieldSchema, field_key='number', field_position=0, field_type=FieldSchemaType.INT,
data_schema=data_schema)
with self.assertRaises(ValueError) as ctx:
data_schema.get_value({field_key: bad_value}, field_key)
self.assertEquals(field_key, ctx.exception.field_key)
self.assertEquals(bad_value, ctx.exception.bad_value)
self.assertEquals(FieldSchemaType.INT, ctx.exception.expected_type)
def test_get_unique_fields_no_fields(self):
"""
Tests the get_unique_fields function when there are no fields defined.
"""
data_schema = G(DataSchema)
self.assertEquals(data_schema.get_unique_fields(), [])
def test_get_unique_fields_no_unique_fields(self):
"""
Tests the get_unique_fields function when there are fields defined, but
none of them have a unique constraint.
"""
data_schema = G(DataSchema)
G(FieldSchema, data_schema=data_schema)
G(FieldSchema, data_schema=data_schema)
self.assertEquals(data_schema.get_unique_fields(), [])
def test_get_unique_fields_one(self):
"""
Tests retrieving one unique field.
"""
data_schema = G(DataSchema)
field = G(FieldSchema, data_schema=data_schema, uniqueness_order=1)
G(FieldSchema, data_schema=data_schema)
self.assertEquals(data_schema.get_unique_fields(), [field])
def test_get_unique_fields_three(self):
"""
Tests retrieving three unique fields.
"""
data_schema = G(DataSchema)
field1 = G(FieldSchema, data_schema=data_schema, uniqueness_order=1)
field2 = G(FieldSchema, data_schema=data_schema, uniqueness_order=3)
field3 = G(FieldSchema, data_schema=data_schema, uniqueness_order=2)
G(FieldSchema, data_schema=data_schema)
self.assertEquals(data_schema.get_unique_fields(), [field1, field3, field2])
def test_optimal_queries_get_unique_fields(self):
"""
Tests that get_unique_fields incurs no additional queries when caching the
schema with the model manager.
"""
data_schema = G(DataSchema)
field1 = G(FieldSchema, data_schema=data_schema, uniqueness_order=1)
field2 = G(FieldSchema, data_schema=data_schema, uniqueness_order=3)
field3 = G(FieldSchema, data_schema=data_schema, uniqueness_order=2)
G(FieldSchema, data_schema=data_schema)
data_schema = DataSchema.objects.get(id=data_schema.id)
with self.assertNumQueries(0):
self.assertEquals(data_schema.get_unique_fields(), [field1, field3, field2])
def test_cached_unique_fields(self):
"""
Tests that get_unique_fields function caches the unique fields.
"""
data_schema = G(DataSchema)
field1 = G(FieldSchema, data_schema=data_schema, uniqueness_order=1)
field2 = G(FieldSchema, data_schema=data_schema, uniqueness_order=3)
field3 = G(FieldSchema, data_schema=data_schema, uniqueness_order=2)
G(FieldSchema, data_schema=data_schema)
data_schema = DataSchema.objects.get(id=data_schema.id)
self.assertFalse(hasattr(data_schema, '_unique_fields'))
self.assertEquals(data_schema.get_unique_fields(), [field1, field3, field2])
self.assertTrue(hasattr(data_schema, '_unique_fields'))
self.assertEquals(data_schema.get_unique_fields(), [field1, field3, field2])
def test_get_fields_no_fields(self):
"""
Tests the get_fields function when there are no fields defined.
"""
data_schema = G(DataSchema)
self.assertEquals(data_schema.get_fields(), [])
def test_get_fields_one(self):
"""
Tests retrieving one field.
"""
data_schema = G(DataSchema)
field = G(FieldSchema, data_schema=data_schema)
G(FieldSchema)
self.assertEquals(data_schema.get_fields(), [field])
def test_get_fields_three(self):
"""
Tests retrieving three fields.
"""
data_schema = G(DataSchema)
field1 = G(FieldSchema, data_schema=data_schema)
field2 = G(FieldSchema, data_schema=data_schema)
field3 = G(FieldSchema, data_schema=data_schema, uniqueness_order=1)
G(FieldSchema)
self.assertEquals(set(data_schema.get_fields()), set([field1, field2, field3]))
def test_get_fields_with_field_ordering(self):
"""
Tests that obtaining fields with a field position returns them in the proper
order.
"""
data_schema = G(DataSchema)
field1 = G(FieldSchema, data_schema=data_schema, field_position=2)
field2 = G(FieldSchema, data_schema=data_schema, field_position=3)
field3 = G(FieldSchema, data_schema=data_schema, field_position=1)
G(FieldSchema)
self.assertEquals(data_schema.get_fields(), [field3, field1, field2])
def test_optimal_queries_get_fields(self):
"""
Tests that get_fields incurs no additional queries when caching the
schema with the model manager.
"""
data_schema = G(DataSchema)
field1 = G(FieldSchema, data_schema=data_schema, uniqueness_order=1)
field2 = G(FieldSchema, data_schema=data_schema, uniqueness_order=3)
field3 = G(FieldSchema, data_schema=data_schema, uniqueness_order=2)
G(FieldSchema)
data_schema = DataSchema.objects.get(id=data_schema.id)
with self.assertNumQueries(0):
self.assertEquals(set(data_schema.get_fields()), set([field1, field3, field2]))
def test_set_value_list(self):
"""
Tests setting the value of a list.
"""
data_schema = G(DataSchema)
G(FieldSchema, data_schema=data_schema, field_key='field_key', field_position=1)
val = ['hello', 'worlds']
data_schema.set_value(val, 'field_key', 'world')
self.assertEquals(val, ['hello', 'world'])
def test_set_value_obj(self):
"""
Tests setting the value of an object.
"""
class Input:
field_key = 'none'
data_schema = G(DataSchema)
G(FieldSchema, data_schema=data_schema, field_key='field_key')
obj = Input()
data_schema.set_value(obj, 'field_key', 'value')
self.assertEquals(obj.field_key, 'value')
def test_set_value_dict(self):
"""
Tests setting the value of a dict.
"""
data_schema = G(DataSchema)
G(FieldSchema, data_schema=data_schema, field_key='field_key')
val = {'field_key': 'value1'}
data_schema.set_value(val, 'field_key', 'value')
self.assertEquals(val['field_key'], 'value')
@patch('data_schema.models.convert_value', set_spec=True)
def test_get_value_dict(self, convert_value_mock):
"""
Tests getting the value of a field when the object is a dictionary.
"""
data_schema = G(DataSchema)
G(FieldSchema, field_key='field_key', field_type=FieldSchemaType.STRING, data_schema=data_schema)
obj = {
'field_key': 'value',
}
data_schema.get_value(obj, 'field_key')
convert_value_mock.assert_called_once_with(FieldSchemaType.STRING, 'value', None, None, None)
def test_get_value_dict_cached(self):
"""
Tests getting the value of a field twice (i.e. the cache gets used)
"""
data_schema = G(DataSchema)
G(FieldSchema, field_key='field_key', data_schema=data_schema, field_type=FieldSchemaType.STRING)
obj = {
'field_key': 'none',
}
value = data_schema.get_value(obj, 'field_key')
self.assertEquals(value, 'none')
value = data_schema.get_value(obj, 'field_key')
self.assertEquals(value, 'none')
@patch('data_schema.models.convert_value', set_spec=True)
def test_get_value_obj(self, convert_value_mock):
"""
Tests the get_value function with an object as input.
"""
class Input:
field_key = 'value'
data_schema = G(DataSchema)
G(
FieldSchema, field_key='field_key', field_type=FieldSchemaType.STRING, field_format='format',
data_schema=data_schema)
data_schema.get_value(Input(), 'field_key')
convert_value_mock.assert_called_once_with(FieldSchemaType.STRING, 'value', 'format', None, None)
@patch('data_schema.models.convert_value', set_spec=True)
def test_get_value_list(self, convert_value_mock):
"""
Tests the get_value function with a list as input.
"""
data_schema = G(DataSchema)
G(
FieldSchema, field_key='field_key', field_position=1, field_type=FieldSchemaType.STRING,
data_schema=data_schema)
data_schema.get_value(['hello', 'world'], 'field_key')
convert_value_mock.assert_called_once_with(FieldSchemaType.STRING, 'world', None, None, None)
class FieldSchemaTest(TestCase):
"""
Tests functionality in the FieldSchema model.
"""
def test_set_value_list(self):
"""
Tests setting the value of a list.
"""
field_schema = G(FieldSchema, field_key='field_key', field_position=1)
val = ['hello', 'worlds']
field_schema.set_value(val, 'world')
self.assertEquals(val, ['hello', 'world'])
def test_set_value_obj(self):
"""
Tests setting the value of an object.
"""
class Input:
field_key = 'none'
field_schema = G(FieldSchema, field_key='field_key')
obj = Input()
field_schema.set_value(obj, 'value')
self.assertEquals(obj.field_key, 'value')
def test_set_value_dict(self):
"""
Tests setting the value of a dict.
"""
field_schema = G(FieldSchema, field_key='field_key')
val = {'field_key': 'value1'}
field_schema.set_value(val, 'value')
self.assertEquals(val['field_key'], 'value')
@patch('data_schema.models.convert_value', set_spec=True)
def test_get_value_dict(self, convert_value_mock):
"""
Tests getting the value of a field when the object is a dictionary.
"""
field_schema = G(FieldSchema, field_key='field_key', field_type=FieldSchemaType.STRING)
field_schema.get_value({'field_key': 'hello'})
convert_value_mock.assert_called_once_with(FieldSchemaType.STRING, 'hello', None, None, None)
@patch('data_schema.models.convert_value', set_spec=True)
def test_get_value_obj(self, convert_value_mock):
"""
Tests the get_value function with an object as input.
"""
class Input:
field_key = 'value'
field_schema = G(FieldSchema, field_key='field_key', field_type=FieldSchemaType.STRING, field_format='format')
field_schema.get_value(Input())
convert_value_mock.assert_called_once_with(FieldSchemaType.STRING, 'value', 'format', None, None)
@patch('data_schema.models.convert_value', set_spec=True)
def test_get_value_list(self, convert_value_mock):
"""
Tests the get_value function with a list as input.
"""
field_schema = G(FieldSchema, field_key='field_key', field_position=1, field_type=FieldSchemaType.STRING)
field_schema.get_value(['hello', 'world'])
convert_value_mock.assert_called_once_with(FieldSchemaType.STRING, 'world', None, None, None)
@patch('data_schema.models.convert_value', set_spec=True)
def test_get_value_dict_non_extant(self, convert_value_mock):
"""
Tests getting the value of a field when the object is a dictionary and it doesn't exist in the dict.
"""
field_schema = G(FieldSchema, field_key='field_key_bad', field_type=FieldSchemaType.STRING)
field_schema.get_value({'field_key': 'hello'})
convert_value_mock.assert_called_once_with(FieldSchemaType.STRING, None, None, None, None)
@patch('data_schema.models.convert_value', set_spec=True)
def test_get_value_obj_non_extant(self, convert_value_mock):
"""
Tests the get_value function with an object as input and the field key is not in the object.
"""
class Input:
field_key = 'value'
field_schema = G(
FieldSchema, field_key='field_key_bad', field_type=FieldSchemaType.STRING, field_format='format')
field_schema.get_value(Input())
convert_value_mock.assert_called_once_with(FieldSchemaType.STRING, None, 'format', None, None)
@patch('data_schema.models.convert_value', set_spec=True)
def test_get_value_list_non_extant_negative(self, convert_value_mock):
"""
Tests the get_value function with a list as input and the input position is negative.
"""
field_schema = G(FieldSchema, field_key='field_key', field_position=-1, field_type=FieldSchemaType.STRING)
field_schema.get_value(['hello', 'world'])
convert_value_mock.assert_called_once_with(FieldSchemaType.STRING, None, None, None, None)
@patch('data_schema.models.convert_value', set_spec=True)
def test_get_value_list_non_extant_out_of_range(self, convert_value_mock):
"""
Tests the get_value function with a list as input and the input position is greater than the
length of the list.
"""
field_schema = G(FieldSchema, field_key='field_key', field_position=2, field_type=FieldSchemaType.STRING)
field_schema.get_value(['hello', 'world'])
convert_value_mock.assert_called_once_with(FieldSchemaType.STRING, None, None, None, None)
def test_set_display_name(self):
"""
Tests that a display name is left alone if different than the field_key
"""
field_schema = G(FieldSchema, field_key='test', display_name='display')
self.assertEqual('test', field_schema.field_key)
self.assertEqual('display', field_schema.display_name)
def test_set_display_name_empty(self):
"""
Tests that the field_key is copied to the display name if there is no display name set when saving
"""
field_schema = G(FieldSchema, field_key='test')
self.assertEqual('test', field_schema.field_key)
self.assertEqual('test', field_schema.display_name)
class DateFieldSchemaTest(TestCase):
"""
Tests the DATE type for field schemas.
"""
def test_no_format_string(self):
"""
Tests when there is a string input with no format string.
"""
field_schema = G(FieldSchema, field_key='time', field_type=FieldSchemaType.DATE)
val = field_schema.get_value({'time': '2013/04/02 9:25 PM'})
self.assertEquals(val, datetime(2013, 4, 2, 21, 25))
def test_none(self):
"""
Tests getting a value of None.
"""
field_schema = G(FieldSchema, field_key='time', field_type=FieldSchemaType.DATE, field_format='%Y-%m-%d')
val = field_schema.get_value({'time': None})
self.assertEquals(val, None)
def test_blank(self):
"""
Tests blank strings of input.
"""
field_schema = G(FieldSchema, field_key='time', field_type=FieldSchemaType.DATE, field_format='%Y-%m-%d')
val = field_schema.get_value({'time': ' '})
self.assertEquals(val, None)
def test_padded_date_with_format(self):
"""
Tests a date that is padded and has a format string.
"""
field_schema = G(FieldSchema, field_key='time', field_type=FieldSchemaType.DATE, field_format='%Y-%m-%d')
val = field_schema.get_value({'time': ' 2013-04-05 '})
self.assertEquals(val, datetime(2013, 4, 5))
def test_get_value_date(self):
"""
Tests getting the value when the input is already a date object.
"""
field_schema = G(FieldSchema, field_key='time', field_type=FieldSchemaType.DATE)
val = field_schema.get_value({'time': datetime(2013, 4, 4)})
self.assertEquals(val, datetime(2013, 4, 4))
def test_get_value_int(self):
"""
Tests getting the date value of an int. Assumed to be a utc timestamp.
"""
field_schema = G(FieldSchema, field_key='time', field_type=FieldSchemaType.DATE)
val = field_schema.get_value({'time': 1399486805})
self.assertEquals(val, datetime(2014, 5, 7, 18, 20, 5))
def test_get_value_float(self):
"""
Tests getting the date value of an float. Assumed to be a utc timestamp.
"""
field_schema = G(FieldSchema, field_key='time', field_type=FieldSchemaType.DATE)
val = field_schema.get_value({'time': 1399486805.0})
self.assertEquals(val, datetime(2014, 5, 7, 18, 20, 5))
def test_get_value_formatted(self):
"""
Tests getting a formatted date value.
"""
field_schema = G(FieldSchema, field_key='time', field_type=FieldSchemaType.DATE, field_format='%Y-%m-%d')
val = field_schema.get_value({'time': '2013-04-05'})
self.assertEquals(val, datetime(2013, 4, 5))
class DatetimeFieldSchemaTest(TestCase):
"""
Tests the DATETIME type for field schemas.
"""
def test_default_value_blank(self):
"""
Tests when a default value is used and there is a blank string.
"""
field_schema = G(
FieldSchema, field_key='time', field_type=FieldSchemaType.DATETIME, default_value='2013/04/02 9:25 PM')
val = field_schema.get_value({'time': ' '})
self.assertEquals(val, datetime(2013, 4, 2, 21, 25))
def test_default_value_null(self):
"""
Tests when a default value is used and there is a null object.
"""
field_schema = G(
FieldSchema, field_key='time', field_type=FieldSchemaType.DATETIME, default_value='2013/04/02 9:25 PM')
val = field_schema.get_value({'time': None})
self.assertEquals(val, datetime(2013, 4, 2, 21, 25))
def test_no_format_string(self):
"""
Tests when there is a string input with no format string.
"""
field_schema = G(FieldSchema, field_key='time', field_type=FieldSchemaType.DATETIME)
val = field_schema.get_value({'time': '2013/04/02 9:25 PM'})
self.assertEquals(val, datetime(2013, 4, 2, 21, 25))
def test_datetime_with_tz_dateutil(self):
"""
Tests that a datetime with a tz is converted back to naive UTC after using dateutil for parsing.
"""
field_schema = G(FieldSchema, field_key='time', field_type=FieldSchemaType.DATETIME)
val = field_schema.get_value({'time': '2013/04/02 09:25:00+0400'})
self.assertEquals(val, datetime(2013, 4, 2, 5, 25))
def test_datetime_with_tz(self):
"""
Tests that a datetime with a tz is converted back to naive UTC.
"""
field_schema = G(FieldSchema, field_key='time', field_type=FieldSchemaType.DATETIME)
val = field_schema.get_value({'time': datetime(2013, 4, 2, 9, 25, tzinfo=pytz.utc)})
self.assertEquals(val, datetime(2013, 4, 2, 9, 25))
def test_none(self):
"""
Tests getting a value of None.
"""
field_schema = G(FieldSchema, field_key='time', field_type=FieldSchemaType.DATETIME, field_format='%Y-%m-%d')
val = field_schema.get_value({'time': None})
self.assertEquals(val, None)
def test_blank(self):
"""
Tests blank strings of input.
"""
field_schema = G(FieldSchema, field_key='time', field_type=FieldSchemaType.DATETIME, field_format='%Y-%m-%d')
val = field_schema.get_value({'time': ' '})
self.assertEquals(val, None)
def test_get_value_date(self):
"""
Tests getting the value when the input is already a date object.
"""
field_schema = G(FieldSchema, field_key='time', field_type=FieldSchemaType.DATETIME)
val = field_schema.get_value({'time': datetime(2013, 4, 4)})
self.assertEquals(val, datetime(2013, 4, 4))
def test_get_value_int(self):
"""
Tests getting the date value of an int. Assumed to be a utc timestamp.
"""
field_schema = G(FieldSchema, field_key='time', field_type=FieldSchemaType.DATETIME)
val = field_schema.get_value({'time': 1399486805})
self.assertEquals(val, datetime(2014, 5, 7, 18, 20, 5))
def test_get_value_float(self):
"""
Tests getting the date value of an float. Assumed to be a utc timestamp.
"""
field_schema = G(FieldSchema, field_key='time', field_type=FieldSchemaType.DATETIME)
val = field_schema.get_value({'time': 1399486805.0})
self.assertEquals(val, datetime(2014, 5, 7, 18, 20, 5))
def test_get_value_formatted(self):
"""
Tests getting a formatted date value.
"""
field_schema = G(
FieldSchema, field_key='time', field_type=FieldSchemaType.DATETIME, field_format='%Y-%m-%d %H:%M:%S')
val = field_schema.get_value({'time': '2013-04-05 12:12:12'})
self.assertEquals(val, datetime(2013, 4, 5, 12, 12, 12))
def test_get_value_formatted_unicode(self):
"""
Tests getting a formatted date in unicode.
"""
field_schema = G(
FieldSchema, field_key='time', field_type=FieldSchemaType.DATETIME, field_format='%Y-%m-%d %H:%M:%S')
val = field_schema.get_value({'time': u'2013-04-05 12:12:12'})
self.assertEquals(val, datetime(2013, 4, 5, 12, 12, 12))
class IntFieldSchemaTest(TestCase):
"""
Tests the INT type for field schemas.
"""
def test_negative_string(self):
"""
Tests parsing a negative string number.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.INT)
val = field_schema.get_value({'val': '-1'})
self.assertEquals(val, -1)
def test_none(self):
"""
Tests getting a value of None.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.INT)
val = field_schema.get_value({'val': None})
self.assertEquals(val, None)
def test_blank(self):
"""
Tests blank strings of input.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.INT)
val = field_schema.get_value({'val': ' '})
self.assertEquals(val, None)
def test_get_value_non_numeric_str(self):
"""
Tests getting the value of a string that has currency information.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.INT)
val = field_schema.get_value({'val': ' $15,000,456 Dollars '})
self.assertAlmostEquals(val, 15000456)
def test_get_value_str(self):
"""
Tests getting the value when the input is a string.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.INT)
val = field_schema.get_value({'val': '1'})
self.assertEquals(val, 1)
def test_get_value_int(self):
"""
Tests getting the value when it is an int.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.INT)
val = field_schema.get_value({'val': 5})
self.assertEquals(val, 5)
def test_get_value_float(self):
"""
Tests getting the date value of a float.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.INT)
val = field_schema.get_value({'val': 5.2})
self.assertEquals(val, 5)
class StringFieldSchemaTest(TestCase):
"""
Tests the STRING type for field schemas.
"""
def test_bad_unicode_input(self):
"""
Unicode special chars should be handled properly.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.STRING)
val = field_schema.get_value({'val': u'\u2019'})
self.assertEquals(val, u'\u2019')
def test_unicode_input(self):
"""
Unicode should be handled properly.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.STRING)
val = field_schema.get_value({'val': u' '})
self.assertEquals(val, '')
def test_matching_format(self):
"""
Tests returning a string that matches a format.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.STRING, field_format=r'^[\d\.]+$')
val = field_schema.get_value({'val': '23.45'})
self.assertEquals(val, '23.45')
def test_non_matching_format(self):
"""
Tests returning a string that matches a format.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.STRING, field_format=r'^[\d\.]+$')
val = field_schema.get_value({'val': '23,45'})
self.assertEquals(val, None)
def test_matching_format_limit_length(self):
"""
Tests returning a string that matches a format of a limited length number.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.STRING, field_format=r'^[\d]{1,5}$')
val = field_schema.get_value({'val': '2345'})
self.assertEquals(val, '2345')
val = field_schema.get_value({'val': '23456'})
self.assertEquals(val, '23456')
def test_non_matching_format_limit_length(self):
"""
Tests returning a string that matches a format of a limited length number.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.STRING, field_format=r'^[\d]{1,5}$')
val = field_schema.get_value({'val': '234567'})
self.assertEquals(val, None)
def test_none(self):
"""
Tests getting a value of None.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.STRING)
val = field_schema.get_value({'val': None})
self.assertEquals(val, None)
def test_blank(self):
"""
Tests blank strings of input. Contrary to other formats, the string field schema
returns a blank string instead of None (since blank strings are valid strings).
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.STRING)
val = field_schema.get_value({'val': ' '})
self.assertEquals(val, '')
def test_strip_whitespaces(self):
"""
Tests that getting a string results in its leading and trailing whitespace being
stripped.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.STRING)
val = field_schema.get_value({'val': ' 1 2 3 '})
self.assertEquals(val, '1 2 3')
def test_get_value_str(self):
"""
Tests getting the value when the input is a string.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.STRING)
val = field_schema.get_value({'val': '1'})
self.assertEquals(val, '1')
def test_get_value_int(self):
"""
Tests getting the value when it is an int.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.STRING)
val = field_schema.get_value({'val': 5})
self.assertEquals(val, '5')
def test_get_value_float(self):
"""
Tests getting the date value of a float.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.STRING)
val = field_schema.get_value({'val': 5.2})
self.assertEquals(val, '5.2')
def test_lowercase(self):
"""
Tests that the string is converted to lowercase
"""
field_schema = G(
FieldSchema, field_key='val', field_type=FieldSchemaType.STRING, transform_case=FieldSchemaCase.LOWER
)
val = field_schema.get_value({'val': 'Value'})
self.assertEquals(val, 'value')
def test_uppercase(self):
"""
Tests that the string is converted to uppercase
"""
field_schema = G(
FieldSchema, field_key='val', field_type=FieldSchemaType.STRING, transform_case=FieldSchemaCase.UPPER
)
val = field_schema.get_value({'val': 'Value'})
self.assertEquals(val, 'VALUE')
class FloatFieldSchemaTest(TestCase):
"""
Tests the FLOAT type for field schemas.
"""
def test_positive_scientific_notation(self):
"""
Tests that positive scientific notation strings are parsed.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.FLOAT)
val = field_schema.get_value({'val': '1.1E2'})
self.assertEquals(val, 110)
def test_positive_scientific_notation_small_e(self):
"""
Tests that positive scientific notation strings are parsed with a lowercase e.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.FLOAT)
val = field_schema.get_value({'val': '1.1e2'})
self.assertEquals(val, 110)
def test_negative_scientific_notation(self):
"""
Tests that negative scientific notation strings are parsed.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.FLOAT)
val = field_schema.get_value({'val': '-1.1E-2'})
self.assertEquals(val, -0.011)
def test_negative_string(self):
"""
Tests parsing a negative string number.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.FLOAT)
val = field_schema.get_value({'val': '-1.1'})
self.assertEquals(val, -1.1)
def test_none(self):
"""
Tests getting a value of None.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.FLOAT)
val = field_schema.get_value({'val': None})
self.assertEquals(val, None)
def test_blank(self):
"""
Tests blank strings of input.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.FLOAT)
val = field_schema.get_value({'val': ' '})
self.assertEquals(val, None)
def test_get_value_non_numeric_str(self):
"""
Tests getting the value of a string that has currency information.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.FLOAT)
val = field_schema.get_value({'val': ' $15,000,456.34 Dollars '})
self.assertAlmostEquals(val, 15000456.34)
def test_get_value_non_numeric_unicode(self):
"""
Tests getting the value of a unicode object that has currency information.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.FLOAT)
val = field_schema.get_value({'val': u' $15,000,456.34 Dollars '})
self.assertAlmostEquals(val, 15000456.34)
def test_get_value_str(self):
"""
Tests getting the value when the input is a string.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.FLOAT)
val = field_schema.get_value({'val': '1'})
self.assertAlmostEquals(val, 1.0)
def test_get_value_int(self):
"""
Tests getting the value when it is an int.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.FLOAT)
val = field_schema.get_value({'val': 5})
self.assertAlmostEquals(val, 5.0)
def test_get_value_float(self):
"""
Tests getting the date value of a float.
"""
field_schema = G(FieldSchema, field_key='val', field_type=FieldSchemaType.FLOAT)
val = field_schema.get_value({'val': 5.2})
self.assertAlmostEquals(val, 5.2)
class FieldOptionTest(TestCase):
def test_set_valid_value(self):
"""
The field schema should have defined options and a valid option should be set
"""
field_schema = G(FieldSchema, field_type=FieldSchemaType.STRING, field_key='my_key', has_options=True)
G(FieldOption, field_schema=field_schema, value='one')
G(FieldOption, field_schema=field_schema, value='two')
item = {
'my_key': None,
}
field_schema.set_value(item, 'one')
self.assertEqual('one', item['my_key'])
def test_set_invalid_value(self):
"""
The field schema should have defined options and an invalid option should be set
"""
field_schema = G(FieldSchema, field_type=FieldSchemaType.STRING, field_key='my_key', has_options=True)
G(FieldOption, field_schema=field_schema, value='one')
G(FieldOption, field_schema=field_schema, value='two')
item = {
'my_key': None,
}
with self.assertRaises(Exception):
field_schema.set_value(item, 'three')
self.assertIsNone(item['my_key'])
def test_set_value_different_type(self):
"""
The field schema should be a different type and it should validate correctly
"""
field_schema = G(FieldSchema, field_type=FieldSchemaType.INT, field_key='my_key', has_options=True)
G(FieldOption, field_schema=field_schema, value='1')
G(FieldOption, field_schema=field_schema, value='2')
item = {
'my_key': None,
}
field_schema.set_value(item, 1)
self.assertEqual(1, item['my_key'])
| |
# Copyright (c) 2010 Arek Korbik, Alessandro Decina
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from twisted.internet import glib2reactor
glib2reactor.install()
from twisted.internet import reactor
# not importing gst here, we want to parse command line options ourselves
# import gst
from twimp import amf0
from twimp.client import BaseClientApp, SimpleAppClientFactory
from twimp.client import connect_client_factory
from twimp.primitives import _s_uchar
from twimp.vecbuf import VecBuf
from twimp.helpers import ellip
LOG_CATEGORY = 'livepubcli'
import twimp.log
log = twimp.log.get_logger(LOG_CATEGORY)
try:
from fractions import Fraction
except ImportError:
class Fraction(object):
def __init__(self, string):
if '/' in string:
n, d = string.split('/', 1)
try:
num = int(n)
denom = int(d)
except ValueError:
raise ValueError('Invalid rational value: %r' % (string,))
else:
import decimal
try:
d = decimal.Decimal(string).normalize()
except decimal.DecimalException:
raise ValueError('Invalid rational value: %r' % (string,))
exp = - d.as_tuple()[2]
if exp > 0:
denom = 10 ** exp
num = int(d * denom)
else:
num = int(d)
denom = 1
self.numerator = num
self.denominator = denom
def __repr__(self):
return '%s("%d/%d")' % (self.__class__.__name__,
self.numerator, self.denominator)
def __str__(self):
return '%d/%d' % (self.numerator, self.denominator)
_s_double_uchar = struct.Struct('>BB')
_s_h264video = struct.Struct('>BBbH') # flags+codec, frame type,
# signed 24-bit cts value split
# into 1+2 bytes
class Codec(object):
description = None
def createBin(self, **attributes):
codecBin = self.createBinReal(**attributes)
self.addProbes(codecBin)
return codecBin
def createBinReal(self, **attributes):
description = self.description % attributes
return gst.parse_bin_from_description(description,
ghost_unconnected_pads=True)
def addProbes(self, codecBin):
for padName in ("sink", "src"):
pad = codecBin.get_pad(padName)
pad.connect("notify::caps", self.padNotifyCaps)
def padNotifyCaps(self, pad, pspec):
log.debug("pad %s negotiated caps %s", pad, pad.get_negotiated_caps())
def getHeadersFromCaps(self, caps):
return None
class VideoCodec(Codec):
codecType = "video"
class AudioCodec(Codec):
codecType = "audio"
class H263VideoCodec(VideoCodec):
codecId = 2
name = 'h263'
description = ("ffenc_flv bitrate=%(bitrate)d "
"max-key-interval=%(key_interval)d")
def createBinReal(self, **attributes):
attributes['bitrate'] *= 1000
return super(H263VideoCodec, self).createBinReal(**attributes)
class SorensonVideoCodec(H263VideoCodec):
name = 'sorenson'
class H264VideoCodec(VideoCodec):
codecId = 7
name = 'h264'
description = ("x264enc bitrate=%(bitrate)g bframes=0 b-adapt=false "
"me=2 subme=6 cabac=false key-int-max=%(key_interval)d")
def getHeadersFromCaps(self, caps):
return [buffer(caps[0]['codec_data'])]
class MP3AudioCodec(AudioCodec):
codecId = 2
name = 'mp3'
description = "lame bitrate=%(bitrate)d"
class AACAudioCodec(AudioCodec):
codecId = 10
name = 'aac'
description = "faac bitrate=%(bitrate)d"
def createBinReal(self, **attributes):
attributes['bitrate'] *= 1000
return super(AACAudioCodec, self).createBinReal(**attributes)
def getHeadersFromCaps(self, caps):
return [buffer(caps[0]['codec_data'])]
class SpeexAudioCodec(AudioCodec):
codecId = 11
name = 'speex'
description = 'speexenc bitrate=%(bitrate)d '
def createBinReal(self, **attributes):
attributes['bitrate'] *= 1000
return super(SpeexAudioCodec, self).createBinReal(**attributes)
def getHeadersFromCaps(self, caps):
# flash doesn't want speex headers
# return map(buffer, caps[0]['streamheader'])
return None
class NoCodec(object):
name = 'none'
def codecMap(*codecs):
return dict((codec.name, codec()) for codec in codecs)
video_codecs = codecMap(H263VideoCodec, SorensonVideoCodec,
H264VideoCodec, NoCodec)
audio_codecs = codecMap(MP3AudioCodec, AACAudioCodec, SpeexAudioCodec, NoCodec)
class NewGstSource(object):
def __init__(self, audiobitrate, videobitrate, audiorate,
channels, framerate, width, height, keyrate, audio_codec=None,
video_codec=None, audiosrc='pulsesrc', videosrc='v4l2src',
audio_opts='', video_opts='', videosink='xvimagesink', view=False,
view_window=None):
# default args translate to:
# aac: 64kbit, 44.1kHz, 16-bit, mono
# h264: 400kbit: 320 x 240 @ 15fps, 5 secs key interval
if audio_codec is None:
audio_codec = AACAudioCodec()
elif isinstance(audio_codec, NoCodec):
audio_codec = None
if video_codec is None:
video_codec = H264VideoCodec()
elif isinstance(video_codec, NoCodec):
video_codec = None
if audiorate is None:
if audio_codec is not None and audio_codec.codecId == 11:
audiorate = 32000
else:
audiorate = 44100
self.audio_codec = audio_codec
self.audio_bitrate = audiobitrate
self.audio_rate = audiorate
self.channels = channels
self.audio_opts = audio_opts
self.video_codec = video_codec
self.video_bitrate = videobitrate
self.framerate = framerate
self.width = width
self.height = height
self.key_rate = keyrate
self.key_interval = int(round(keyrate * framerate.numerator /
float(framerate.denominator)))
self.video_opts = video_opts
self.videosink = videosink
self.view = view
self.view_window = view_window
self.audiosrc = audiosrc
self.videosrc = videosrc
self.pipeline = None
self._stream = None
self.asink = None
self.audio_caps = None
self.vsink = None
self.video_caps = None
self.video_avc_profile = None
self.video_avc_level = None
self._ah1 = 0
self._header_audio = None
self.complex_audio_header = (self.audio_codec is not None and
self.audio_codec.codecId == 10)
def _make_pipeline_audio(self):
source = gst.element_factory_make(self.audiosrc)
queue = gst.element_factory_make("queue")
audiorate = gst.element_factory_make("audiorate")
audioconvert = gst.element_factory_make("audioconvert")
capsfilter = gst.element_factory_make("capsfilter")
tee = gst.element_factory_make("tee")
encoder = self.audio_codec.createBin(bitrate=self.audio_bitrate)
self.asink = sink = gst.element_factory_make("appsink")
sink.props.async = True
# set output rate and channels
filter_caps = gst.Caps()
audio_formats = ("audio/x-raw-int", "audio/x-raw-float")
for audio_format in audio_formats:
structure = gst.Structure(audio_format)
structure["rate"] = self.audio_rate
structure["channels"] = self.channels
filter_caps.append_structure(structure)
capsfilter.props.caps = filter_caps
audioBin = gst.Bin()
audioBin.add(source, queue, audiorate, audioconvert, capsfilter,
tee, encoder, sink)
gst.element_link_many(source, queue, audiorate, audioconvert,
capsfilter, tee, encoder, sink)
return audioBin
def _make_pipeline_video(self):
videosrc = gst.element_factory_make(self.videosrc)
queue = gst.element_factory_make("queue")
videorate = gst.element_factory_make("videorate")
ffmpegcolorspace = gst.element_factory_make("ffmpegcolorspace")
videoscale = gst.element_factory_make("videoscale")
capsfilter = gst.element_factory_make("capsfilter")
tee = gst.element_factory_make("tee")
encoder = self.video_codec.createBin(bitrate=self.video_bitrate,
key_interval=self.key_interval)
self.vsink = sink = gst.element_factory_make("appsink")
sink.props.async = True
# set video resolution/framerate
filter_caps = gst.Caps()
video_formats = ("video/x-raw-yuv", "video/x-raw-rgb")
for video_format in video_formats:
structure = gst.Structure(video_format)
structure["width"] = self.width
structure["height"] = self.height
structure["framerate"] = gst.Fraction(self.framerate.numerator,
self.framerate.denominator)
filter_caps.append_structure(structure)
capsfilter.props.caps = filter_caps
videoBin = gst.Bin()
videoBin.add(videosrc, queue, videorate, ffmpegcolorspace, videoscale,
capsfilter, tee, encoder, sink)
gst.element_link_many(videosrc, queue, videorate, ffmpegcolorspace,
videoscale, capsfilter, tee, encoder, sink)
if self.view:
queue1 = gst.element_factory_make("queue")
ffmpegcolorspace1 = gst.element_factory_make("ffmpegcolorspace")
videoscale1 = gst.element_factory_make("videoscale")
videosink = gst.element_factory_make(self.videosink)
videoBin.add(queue1, ffmpegcolorspace1, videoscale1, videosink)
gst.element_link_many(tee, queue1, ffmpegcolorspace1, videoscale1,
videosink)
return videoBin
def make_pipeline(self):
if not (self.audio_codec or self.video_codec):
raise RuntimeError('no audio nor video codecs specified')
self.pipeline = gst.Pipeline()
if self.audio_codec:
self.pipeline.add(self._make_pipeline_audio())
if self.video_codec:
self.pipeline.add(self._make_pipeline_video())
def connect(self, stream):
self._stream = stream
# ...???
def disconnect(self):
self._stream = None
def write_rtmp_meta_headers(self, ts):
meta = dict(duration=0.0)
if self.vsink:
meta.update(width=self.width, height=self.height,
framerate=(float(self.framerate.numerator) /
self.framerate.denominator),
videodatarate=self.video_bitrate,
videokeyframe_frequency=self.key_rate)
if self.video_codec.codecId == 7:
meta.update(videocodecid='avc1',
avcprofile=self.video_avc_profile,
avclevel=self.video_avc_level)
if self.asink:
meta.update(audiosamplerate=self.audio_rate,
audiodatarate=self.audio_bitrate,
audiochannels=self.channels)
if self.audio_codec.codecId == 2:
meta.update(audiocodecid='.mp3')
elif self.audio_codec.codecId == 10:
meta.update(audiocodecid='mp4a')
wm = self._stream.write_meta
wm(ts, amf0.encode('onStatus',
amf0.Object(code='NetStream.Data.Start')))
wm(ts, amf0.encode('onMetaData', meta))
wm(ts, amf0.encode('@setDataFrame', 'onMetaData', meta))
def prepare_audio(self):
log.info('prepare_audio, caps: "%s"', self.audio_caps)
if self.audio_codec.codecId in (10, 11): # aac and speex are always
# marked 44kHz, stereo
rate = 44100
channels = 2
else:
rate = self.audio_rate
channels = self.channels
h1 = self.audio_codec.codecId << 4
h1 |= {44100: 3, 22050: 2, 11025: 1}.get(rate, 3) << 2
h1 |= 1 << 1 # always 16-bit size
h1 |= int(channels == 2)
self._ah1 = h1
if self.complex_audio_header:
self._header_audio = _s_double_uchar.pack(h1, 1) # h1 | "keyframe"
else:
self._header_audio = _s_uchar.pack(h1)
def prepare_video(self):
log.info('prepare_video, caps: "%s"', self.video_caps)
if self.video_codec.codecId == 7:
codec_data = self.video_caps[0]['codec_data']
self.video_avc_profile = ord(codec_data[1])
self.video_avc_level = ord(codec_data[3])
self._make_rtmp_video = self._make_rtmp_video_complex
else:
self._make_rtmp_video = self._make_rtmp_video_simple
def write_rtmp_audio_headers(self, ts, codec_data):
h = _s_double_uchar.pack(self._ah1, 0)
for buf in codec_data:
self._stream.write_audio(ts, VecBuf([h, buf]))
def write_rtmp_video_headers(self, ts, codec_data):
h = _s_h264video.pack(0x10 | self.video_codec.codecId, 0, 0, 0)
for buf in codec_data:
self._stream.write_video(ts, VecBuf([h, buf]))
def make_rtmp_audio(self, gst_buf):
return VecBuf([self._header_audio, buffer(gst_buf)])
def _audio_data_cb(self, element):
buf = element.emit('pull-buffer')
ts = int(buf.timestamp / 1000000.0)
ts = ts % 0x100000000 # api clients are supposed to handle
# timestamp wraps (4-bytes only)
log.debug('[A] %5d [%d] %s', int(ts), int(buf.duration / 1000000),
ellip(str(buf).encode('hex'), maxlen=102))
frame = self.make_rtmp_audio(buf)
reactor.callFromThread(self._stream.write_audio, ts, frame)
def _make_rtmp_video_simple(self, gst_buf):
flags = 0x10
if gst_buf.flags & gst.BUFFER_FLAG_DELTA_UNIT:
flags = 0x20
return VecBuf([_s_uchar.pack(flags | self.video_codec.codecId),
buffer(gst_buf)])
def _make_rtmp_video_complex(self, gst_buf):
flags = 0x10
if gst_buf.flags & gst.BUFFER_FLAG_DELTA_UNIT:
flags = 0x20
return VecBuf([_s_h264video.pack(flags | self.video_codec.codecId, 1, 0, 0),
buffer(gst_buf)])
def make_rtmp_video(self, gst_buf):
return self._make_rtmp_video(gst_buf)
def _video_data_cb(self, element):
buf = element.emit('pull-buffer')
ts = int(buf.timestamp / 1000000.0)
ts = ts % 0x100000000
log.debug('[V] %5d [%d] %s', int(ts), int(buf.duration / 1000000),
ellip(str(buf).encode('hex'), maxlen=102))
frame = self.make_rtmp_video(buf)
reactor.callFromThread(self._stream.write_video, ts, frame)
def _got_caps_cb(self, pad, args):
cid, is_video = self._caps_pending.pop(pad)
pad.disconnect(cid)
caps = pad.get_negotiated_caps()
if is_video:
self.video_caps = caps
else:
self.audio_caps = caps
if not self._caps_pending:
self._have_caps()
def _have_caps(self):
self._attach_sink_callbacks()
if self.vsink:
self.prepare_video()
if self.asink:
self.prepare_audio()
self._send_headers()
def _send_headers(self):
reactor.callFromThread(self.write_rtmp_meta_headers, 0)
# first send headers for gormats that need headers
if self.vsink:
headers = self.video_codec.getHeadersFromCaps(self.video_caps)
if headers is not None:
reactor.callFromThread(self.write_rtmp_video_headers, 0,
headers)
if self.asink:
headers = self.audio_codec.getHeadersFromCaps(self.audio_caps)
if headers is not None:
reactor.callFromThread(self.write_rtmp_audio_headers, 0,
headers)
def _build_pipeline(self):
self.make_pipeline()
log.info('pipeline: %r', self.pipeline)
self._caps_pending = {}
if self.audio_codec:
pad = self.asink.get_static_pad('sink')
cid = pad.connect('notify::caps', self._got_caps_cb)
self._caps_pending[pad] = (cid, 0)
if self.video_codec:
pad = self.vsink.get_static_pad('sink')
cid = pad.connect('notify::caps', self._got_caps_cb)
self._caps_pending[pad] = (cid, 1)
def _attach_sink_callbacks(self):
if self.asink:
self.asink.connect('new-buffer', self._audio_data_cb)
self.asink.props.emit_signals = True
if self.vsink:
self.vsink.connect('new-buffer', self._video_data_cb)
self.vsink.props.emit_signals = True
def _bus_message_eos_cb(self, bus, message):
log.info("eos, quitting")
reactor.stop()
def _bus_message_error_cb(self, bus, message):
gerror, debug = message.parse_error()
log.error("%s -- %s", gerror.message, debug)
reactor.stop()
def _bus_sync_message_element_cb(self, bus, message):
structure = message.structure
if structure.get_name() == 'prepare-xwindow-id':
if self.view_window is None:
# let the sink create a window
return
sink = message.src
sink.set_xwindow_id(self.view_window)
def start(self):
if self.pipeline is None:
self._build_pipeline()
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.connect('message::eos', self._bus_message_eos_cb)
bus.connect('message::error', self._bus_message_error_cb)
bus.enable_sync_message_emission()
bus.connect('sync-message::element', self._bus_sync_message_element_cb)
self.pipeline.set_state(gst.STATE_PLAYING)
msg = bus.timed_pop_filtered(20 * gst.SECOND,
gst.MESSAGE_ERROR |
gst.MESSAGE_ASYNC_DONE)
if msg is None:
log.error("timeout: pipeline failed to preroll")
reactor.stop()
def stop(self):
if self.pipeline:
self.pipeline.set_state(gst.STATE_NULL)
class SimplePublishingApp(BaseClientApp):
def __init__(self, protocol, publish_source, publish_name='livestream'):
BaseClientApp.__init__(self, protocol)
self._stream = None
self.publish_name = publish_name
self.publish_source = publish_source
def get_connect_params(self):
return dict(
# videoCodecs=252,
# audioCodecs=3191,
# videoFunction=1,
# capabilities=15,
)
def connectionMade(self, info):
log.info('Yay, connected! (%r)', info)
# assert server (info) supports necessary codecs?
d = self.createStream()
d.addCallbacks(self._start_streaming, self._disconnect)
def _start_streaming(self, stream):
self._stream = stream
stream.publish(self.publish_name, self.publish_source)
def _disconnect(self, failure):
self.disconnect()
def connectionLost(self, reason):
log.info('app disconnected: %s', reason.getErrorMessage())
if self._stream:
self.closeStream(self._stream, force=True)
self._stream = None
self.publish_source = None
def connectionFailed(self, reason):
log.error("app couldn't connect: %s", reason.getErrorMessage())
class OneTimeAppFactory(SimpleAppClientFactory):
def clientConnectionFailed(self, _connector, reason):
log.error('connecting to %r failed: %s', self.url,
reason.getErrorMessage())
reactor.stop()
def clientConnectionLost(self, _connector, reason):
log.info('lost connection: %s', reason.getErrorMessage())
if reactor.running:
reactor.stop()
def run(url, stream_name='livestream', audio_codec=None, video_codec=None,
audio_bitrate=64, video_bitrate=400, samplerate=44100,
framerate=Fraction('15/1'), channels=1,
width=320, height=240, keyframe_interval=5.0, view=False,
view_window=None):
src = NewGstSource(audio_bitrate, video_bitrate,
samplerate, channels, framerate, width, height,
keyframe_interval, audio_codec=audio_codec,
video_codec=video_codec, view=view,
view_window=view_window)
f = connect_client_factory(url, OneTimeAppFactory,
SimplePublishingApp,
src,
stream_name)
reactor.run()
def main(argv):
import optparse
def _check_fraction(option, opt, value):
try:
return Fraction(value)
except ValueError:
raise optparse.OptionValueError('option %s: invalid rational value:'
' %r' % (opt, value))
optparse.Option.TYPES += ('Fraction',)
optparse.Option.TYPE_CHECKER['Fraction'] = _check_fraction
default_streamname = 'livestream'
usage = '%prog [options] URL [STREAM-NAME]'
epilog = ('URL should be of the form: rtmp://host[:port]/app, '
'STREAM-NAME defaults to "%s".' % (default_streamname,))
parser = optparse.OptionParser(usage=usage, epilog=epilog)
audio_choices = sorted(audio_codecs.keys())
video_choices = sorted(video_codecs.keys())
parser.add_option('-a', '--audio-codec', action='store', dest='acodec',
default='aac',
choices=audio_choices,
help=('one of: %s; (default: %%default)' %
', '.join(audio_choices)))
parser.add_option('-v', '--video-codec', action='store', dest='vcodec',
default='h264',
choices=video_choices,
help=('one of: %s; (default: %%default)' %
', '.join(video_choices)))
parser.add_option('-r', '--audio-bitrate', action='store', dest='arate',
type='float',
help='audio bitrate in kbit/s (default: %default)',
default=64.0)
parser.add_option('-R', '--video-bitrate', action='store', dest='vrate',
type='float',
help='video bitrate in kbit/s (default: %default)',
default=400.0)
parser.add_option('-s', '--sample-rate', action='store', dest='srate',
type='int',
help=('audio sample rate in Hz (default: speex: 32000, '
'other codecs: 44100)'))
parser.add_option('-f', '--frame-rate', action='store', dest='frate',
type='Fraction',
help='video frame rate in frames/sec (default: %default)',
default='15/1')
parser.add_option('-c', '--channels', action='store', dest='channels',
type='int',
help='number of audio channels (default: %default)',
default=1)
parser.add_option('-W', '--width', action='store', dest='width',
type='int',
help='video width (default: %default)',
default=320)
parser.add_option('-H', '--height', action='store', dest='height',
type='int',
help='video height (default: %default)',
default=240)
parser.add_option('-k', '--keyframe-interval', action='store',
dest='keyint', type='float',
help='keyframe interval in seconds (default: %default)',
default=5.0)
parser.add_option('-d', '--debug', action='store', dest='debug',
help=('comma separated list of "[CATEGORY:]LEVEL" log'
' level specifiers'),
metavar='LEVELS')
parser.add_option('-w', '--view', action='store_true', dest='view',
help='view what is being published',
default=False)
options, args = parser.parse_args(argv)
if len(args) < 2:
parser.error('No server URL specified.')
url = args[1]
stream_name = default_streamname
if len(args) > 2:
stream_name = args[2]
audio_codec = audio_codecs.get(options.acodec, None)
video_codec = video_codecs.get(options.vcodec, None)
if video_codec is None and audio_codec is None:
parser.error("Can't disable both audio and video.")
twimp.log.set_levels_from_env()
if options.debug:
twimp.log.set_levels(options.debug)
twimp.log.hook_twisted()
run(url, stream_name, audio_codec=audio_codec, video_codec=video_codec,
audio_bitrate=options.arate, video_bitrate=options.vrate,
samplerate=options.srate, framerate=options.frate,
channels=options.channels, width=options.width, height=options.height,
keyframe_interval=options.keyint, view=options.view)
if __name__ == '__main__':
import sys
args, sys.argv[:] = sys.argv[:], sys.argv[0:1]
import gst
main(args)
| |
import os
import sys
import numpy as np
import pygame
from pygame.constants import K_w
from .. import base
class BirdPlayer(pygame.sprite.Sprite):
def __init__(self,
SCREEN_WIDTH, SCREEN_HEIGHT, init_pos,
image_assets, rng, color="red", scale=1.0):
self.SCREEN_WIDTH = SCREEN_WIDTH
self.SCREEN_HEIGHT = SCREEN_HEIGHT
self.image_order = [0, 1, 2, 1]
#done image stuff
pygame.sprite.Sprite.__init__(self)
self.image_assets = image_assets
self.init(init_pos, color)
self.height = self.image.get_height()
self.scale = scale
#all in terms of y
self.vel = 0
self.FLAP_POWER = 9*self.scale
self.MAX_DROP_SPEED = 10.0
self.GRAVITY = 1.0*self.scale
self.rng = rng
self._oscillateStartPos() #makes the direction and position random
self.rect.center = (self.pos_x, self.pos_y) #could be done better
def init(self, init_pos, color):
#set up the surface we draw the bird too
self.flapped = True #start off w/ a flap
self.current_image = 0
self.color = color
self.image = self.image_assets[self.color][self.current_image]
self.rect = self.image.get_rect()
self.thrust_time = 0.0
self.tick = 0
self.pos_x = init_pos[0]
self.pos_y = init_pos[1]
def _oscillateStartPos(self):
offset = 8*np.sin( self.rng.rand() * np.pi )
self.pos_y += offset
def flap(self):
if self.pos_y > -2.0*self.image.get_height():
self.vel = 0.0
self.flapped = True
def update(self, dt):
self.tick += 1
#image cycle
if (self.tick + 1) % 15 == 0:
self.current_image += 1
if self.current_image >= 3:
self.current_image = 0
#set the image to draw with.
self.image = self.image_assets[self.color][self.current_image]
self.rect = self.image.get_rect()
if self.vel < self.MAX_DROP_SPEED and self.thrust_time == 0.0:
self.vel += self.GRAVITY
#the whole point is to spread this out over the same time it takes in 30fps.
if self.thrust_time+dt <= (1.0/30.0) and self.flapped:
self.thrust_time += dt
self.vel += -1.0*self.FLAP_POWER
else:
self.thrust_time = 0.0
self.flapped = False
self.pos_y += self.vel
self.rect.center = (self.pos_x, self.pos_y)
def draw(self, screen):
screen.blit(self.image, self.rect.center)
class Pipe(pygame.sprite.Sprite):
def __init__(self,
SCREEN_WIDTH, SCREEN_HEIGHT, gap_start, gap_size, image_assets, scale,
offset=0, color="green"):
self.speed = 4.0*scale
self.SCREEN_WIDTH = SCREEN_WIDTH
self.SCREEN_HEIGHT = SCREEN_HEIGHT
self.image_assets = image_assets
#done image stuff
self.width = self.image_assets["green"]["lower"].get_width()
pygame.sprite.Sprite.__init__(self)
self.image = pygame.Surface((self.width, self.SCREEN_HEIGHT))
self.image.set_colorkey((0,0,0))
self.init(gap_start, gap_size, offset, color)
def init(self, gap_start, gap_size, offset, color):
self.image.fill((0,0,0))
self.gap_start = gap_start
self.x = self.SCREEN_WIDTH+self.width+offset
self.lower_pipe = self.image_assets[color]["lower"]
self.upper_pipe = self.image_assets[color]["upper"]
top_bottom = gap_start-self.upper_pipe.get_height()
bottom_top = gap_start+gap_size
self.image.blit(self.upper_pipe, (0, top_bottom ))
self.image.blit(self.lower_pipe, (0, bottom_top ))
self.rect = self.image.get_rect()
self.rect.center = (self.x, self.SCREEN_HEIGHT/2)
def update(self, dt):
self.x -= self.speed
self.rect.center = (self.x, self.SCREEN_HEIGHT/2)
class Backdrop():
def __init__(self, SCREEN_WIDTH, SCREEN_HEIGHT, image_background, image_base, scale):
self.SCREEN_WIDTH = SCREEN_WIDTH
self.SCREEN_HEIGHT = SCREEN_HEIGHT
self.background_image = image_background
self.base_image = image_base
self.x = 0
self.speed = 4.0*scale
self.max_move = self.base_image.get_width() - self.background_image.get_width()
def update_draw_base(self, screen, dt):
#the extra is on the right
if self.x > -1*self.max_move:
self.x -= self.speed
else:
self.x = 0
screen.blit(self.base_image, (self.x, self.SCREEN_HEIGHT*0.79))
def draw_background(self, screen):
screen.blit(self.background_image, (0,0))
class FlappyBird(base.Game):
"""
Used physics values from sourabhv's `clone`_.
.. _clone: https://github.com/sourabhv/FlapPyBird
Parameters
----------
width : int (default: 288)
Screen width. Consistent gameplay is not promised for different widths or heights, therefore the width and height should not be altered.
height : inti (default: 512)
Screen height.
pipe_gap : int (default: 100)
The gap in pixels left between the top and bottom pipes.
"""
def __init__(self, width=288, height=512, pipe_gap=100):
actions = {
"up": K_w
}
fps = 30
base.Game.__init__(self, width, height, actions=actions)
self.scale = 30.0/fps
self.allowed_fps = 30 #restrict the fps
self.pipe_gap = 100
self.pipe_color = "red"
self.images = {}
#so we can preload images
pygame.display.set_mode((1,1), pygame.NOFRAME)
self._dir_ = os.path.dirname(os.path.abspath(__file__))
self._asset_dir = os.path.join( self._dir_, "assets/" )
self._load_images()
self.pipe_offsets = [0, self.width*0.5, self.width]
self.init_pos = (
int( self.width * 0.2),
int( self.height / 2 )
)
self.pipe_min = int(self.pipe_gap/4)
self.pipe_max = int(self.height*0.79*0.6 - self.pipe_gap/2)
self.backdrop = None
self.player = None
self.pipe_group = None
def _load_images(self):
#preload and convert all the images so its faster when we reset
self.images["player"] = {}
for c in ["red", "blue", "yellow"]:
image_assets = [
os.path.join( self._asset_dir, "%sbird-upflap.png" % c ),
os.path.join( self._asset_dir, "%sbird-midflap.png" % c ),
os.path.join( self._asset_dir, "%sbird-downflap.png" % c ),
]
self.images["player"][c] = [ pygame.image.load(im).convert_alpha() for im in image_assets ]
self.images["background"] = {}
for b in ["day", "night"]:
path = os.path.join( self._asset_dir, "background-%s.png" % b )
self.images["background"][b] = pygame.image.load(path).convert()
self.images["pipes"] = {}
for c in ["red", "green"]:
path = os.path.join( self._asset_dir, "pipe-%s.png" % c )
self.images["pipes"][c] = {}
self.images["pipes"][c]["lower"] = pygame.image.load(path).convert_alpha()
self.images["pipes"][c]["upper"] = pygame.transform.rotate(self.images["pipes"][c]["lower"], 180)
path = os.path.join( self._asset_dir, "base.png" )
self.images["base"] = pygame.image.load(path).convert()
def init(self):
if self.backdrop is None:
self.backdrop = Backdrop(
self.width,
self.height,
self.images["background"]["day"],
self.images["base"],
self.scale
)
if self.player is None:
self.player = BirdPlayer(
self.width,
self.height,
self.init_pos,
self.images["player"],
self.rng,
color="red",
scale=self.scale
)
if self.pipe_group is None:
self.pipe_group = pygame.sprite.Group([
self._generatePipes(offset=-75),
self._generatePipes(offset=-75+self.width/2),
self._generatePipes(offset=-75+self.width*1.5)
])
color = self.rng.choice(["day", "night"])
self.backdrop.background_image = self.images["background"][color]
#instead of recreating
color = self.rng.choice(["red", "blue", "yellow"])
self.player.init(self.init_pos, color)
self.pipe_color = self.rng.choice(["red", "green"])
for i,p in enumerate(self.pipe_group):
self._generatePipes(offset=self.pipe_offsets[i], pipe=p)
self.score = 0.0
self.lives = 1
self.tick = 0
def getGameState(self):
"""
Gets a non-visual state representation of the game.
Returns
-------
dict
* player y position.
* players velocity.
* next pipe distance to player
* next pipe top y position
* next pipe bottom y position
* next next pipe distance to player
* next next pipe top y position
* next next pipe bottom y position
See code for structure.
"""
pipes = []
for p in self.pipe_group:
if p.x > self.player.pos_x:
pipes.append((p, p.x - self.player.pos_x))
sorted(pipes, key=lambda p: p[1])
next_pipe = pipes[1][0]
next_next_pipe = pipes[0][0]
if next_next_pipe.x < next_pipe.x:
next_pipe, next_next_pipe = next_next_pipe, next_pipe
state = {
"player_y": self.player.pos_y,
"player_vel": self.player.vel,
"next_pipe_dist_to_player": next_pipe.x - self.player.pos_x,
"next_pipe_top_y": next_pipe.gap_start,
"next_pipe_bottom_y": next_pipe.gap_start+self.pipe_gap,
"next_next_pipe_dist_to_player": next_next_pipe.x - self.player.pos_x,
"next_next_pipe_top_y": next_next_pipe.gap_start,
"next_next_pipe_bottom_y": next_next_pipe.gap_start+self.pipe_gap
}
return state
def getScore(self):
return self.score
def _generatePipes(self, offset=0, pipe=None):
start_gap = self.rng.random_integers(
self.pipe_min,
self.pipe_max
)
if pipe == None:
pipe = Pipe(
self.width,
self.height,
start_gap,
self.pipe_gap,
self.images["pipes"],
self.scale,
color=self.pipe_color,
offset=offset
)
return pipe
else:
pipe.init(start_gap, self.pipe_gap, offset, self.pipe_color)
def _handle_player_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
key = event.key
if key == self.actions['up']:
self.player.flap()
def game_over(self):
return self.lives <= 0
def step(self, dt):
self.tick += 1
dt = dt / 1000.0
self.score += self.rewards["tick"]
#handle player movement
self._handle_player_events()
for p in self.pipe_group:
hit = pygame.sprite.spritecollide(self.player, self.pipe_group, False)
for h in hit: #do check to see if its within the gap.
top_pipe_check = ((self.player.pos_y - self.player.height/2) <= h.gap_start)
bot_pipe_check = ((self.player.pos_y + self.player.height) > h.gap_start+self.pipe_gap)
if top_pipe_check:
self.lives -= 1
if bot_pipe_check:
self.lives -= 1
#is it past the player?
if (p.x - p.width/2) <= self.player.pos_x < (p.x - p.width/2 + 4):
self.score += self.rewards["positive"]
#is out out of the screen?
if p.x < -p.width:
self._generatePipes(offset=self.width*0.2, pipe=p)
#fell on the ground
if self.player.pos_y >= 0.79*self.height - self.player.height:
self.lives -= 1
#went above the screen
if self.player.pos_y < -self.player.height:
self.lives -= 1
self.player.update(dt)
self.pipe_group.update(dt)
if self.lives <= 0:
self.score += self.rewards["loss"]
self.backdrop.draw_background(self.screen)
self.pipe_group.draw(self.screen)
self.backdrop.update_draw_base(self.screen, dt)
self.player.draw(self.screen)
| |
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import function
from chainer.functions.activation import log_softmax
from chainer.utils import type_check
from chainer import variable
def _broadcast_to(array, shape):
if hasattr(numpy, 'broadcast_to'):
return numpy.broadcast_to(array, shape)
dummy = numpy.empty(shape, array.dtype)
return numpy.broadcast_arrays(array, dummy)[0]
def _check_class_weight_option(class_weight):
if class_weight is not None:
if class_weight.ndim != 1:
raise ValueError('class_weight.ndim should be 1')
if class_weight.dtype.kind != 'f':
raise ValueError('The dtype of class_weight should be \'f\'')
if isinstance(class_weight, variable.Variable):
raise ValueError('class_weight should be a numpy.ndarray or '
'cupy.ndarray, not a chainer.Variable')
def _check_reduce_option(reduce):
if reduce not in ('mean', 'no'):
raise ValueError(
"only 'mean' and 'no' are valid for 'reduce', but '%s' is "
'given' % reduce)
def _check_input_values(x, t, ignore_label):
# Extract the raw ndarray as Variable.__ge__ is not implemented.
# We assume that t is already an ndarray.
if isinstance(x, variable.Variable):
x = x.data
if not (((0 <= t) &
(t < x.shape[1])) |
(t == ignore_label)).all():
msg = ('Each label `t` need to satisfy '
'`0 <= t < x.shape[1] or t == %d`' % ignore_label)
raise ValueError(msg)
class SoftmaxCrossEntropy(function.Function):
"""Softmax activation followed by a cross entropy loss."""
normalize = True
y = None
def __init__(self, normalize=True, cache_score=True, class_weight=None,
ignore_label=-1, reduce='mean'):
self.normalize = normalize
self.cache_score = cache_score
_check_class_weight_option(class_weight)
self.class_weight = class_weight
self.ignore_label = ignore_label
_check_reduce_option(reduce)
self.reduce = reduce
def check_type_forward(self, in_types):
type_check.argname(in_types, ('x', 't'))
x_type, t_type = in_types
type_check.expect(
x_type.dtype.kind == 'f',
t_type.dtype.kind == 'i',
t_type.ndim == x_type.ndim - 1,
x_type.shape[0] == t_type.shape[0],
x_type.shape[2:] == t_type.shape[1:],
)
def forward_cpu(self, inputs):
x, t = inputs
if chainer.is_debug():
_check_input_values(x, t, self.ignore_label)
log_y = log_softmax._log_softmax(x)
if self.cache_score:
self.y = numpy.exp(log_y)
if self.class_weight is not None:
shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
log_y *= _broadcast_to(self.class_weight.reshape(shape), x.shape)
log_yd = numpy.rollaxis(log_y, 1)
log_yd = log_yd.reshape(len(log_yd), -1)
log_p = log_yd[numpy.maximum(t.ravel(), 0), numpy.arange(t.size)]
log_p *= (t.ravel() != self.ignore_label)
if self.reduce == 'mean':
# deal with the case where the SoftmaxCrossEntropy is
# unpickled from the old version
if self.normalize:
count = (t != self.ignore_label).sum()
else:
count = len(x)
self._coeff = 1.0 / max(count, 1)
y = log_p.sum(keepdims=True) * (-self._coeff)
return y.reshape(()),
else:
return -log_p.reshape(t.shape),
def forward_gpu(self, inputs):
cupy = cuda.cupy
x, t = inputs
if chainer.is_debug():
_check_input_values(x, t, self.ignore_label)
if x.size == 0:
y = cupy.zeros(t.shape, dtype=x.dtype)
if self.cache_score:
self.y = y
if self.reduce == 'mean':
return y.sum(),
else:
return y,
log_y = log_softmax._log_softmax(x)
if self.cache_score:
self.y = cupy.exp(log_y)
if self.class_weight is not None:
shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
log_y *= cupy.broadcast_to(
self.class_weight.reshape(shape), x.shape)
if self.normalize:
coeff = cupy.maximum(1, (t != self.ignore_label).sum())
else:
coeff = max(1, len(t))
self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype)
log_y = cupy.rollaxis(log_y, 1, log_y.ndim)
if self.reduce == 'mean':
ret = cuda.reduce(
'S t, raw T log_y, int32 n_channel, raw T coeff, '
'S ignore_label',
'T out',
't == ignore_label ? T(0) : log_y[_j * n_channel + t]',
'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd'
)(t, log_y.reduced_view(), log_y.shape[-1],
self._coeff, self.ignore_label)
else:
ret = cuda.elementwise(
'S t, raw T log_y, int32 n_channel, T ignore', 'T out',
'''
if (t == ignore) {
out = 0;
} else {
out = -log_y[i * n_channel + t];
}
''',
'softmax_crossent_no_reduce_fwd'
)(t, log_y.reduced_view(), log_y.shape[-1], self.ignore_label)
ret = ret.reshape(t.shape)
return ret,
def backward_cpu(self, inputs, grad_outputs):
x, t = inputs
gloss = grad_outputs[0]
if x.size == 0:
return numpy.zeros(x.shape, dtype=x.dtype), None
if self.y is not None:
y = self.y.copy()
else:
y = log_softmax._log_softmax(x)
numpy.exp(y, out=y)
if y.ndim == 2:
gx = y
gx[numpy.arange(len(t)), numpy.maximum(t, 0)] -= 1
if self.class_weight is not None:
shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
c = _broadcast_to(self.class_weight.reshape(shape), x.shape)
c = c[numpy.arange(len(t)), numpy.maximum(t, 0)]
gx *= _broadcast_to(numpy.expand_dims(c, 1), gx.shape)
gx *= (t != self.ignore_label).reshape((len(t), 1))
else:
# in the case where y.ndim is higher than 2,
# we think that a current implementation is inefficient
# because it yields two provisional arrays for indexing.
n_unit = t.size // len(t)
gx = y.reshape(y.shape[0], y.shape[1], -1)
fst_index = numpy.arange(t.size) // n_unit
trd_index = numpy.arange(t.size) % n_unit
gx[fst_index, numpy.maximum(t.ravel(), 0), trd_index] -= 1
if self.class_weight is not None:
shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
c = _broadcast_to(self.class_weight.reshape(shape), x.shape)
c = c.reshape(gx.shape)
c = c[fst_index, numpy.maximum(t.ravel(), 0), trd_index]
c = c.reshape(y.shape[0], 1, -1)
gx *= _broadcast_to(c, gx.shape)
gx *= (t != self.ignore_label).reshape((len(t), 1, -1))
gx = gx.reshape(y.shape)
if self.reduce == 'mean':
gx *= gloss * self._coeff
else:
gx *= gloss[:, None]
return gx, None
def backward_gpu(self, inputs, grad_outputs):
cupy = cuda.cupy
x, t = inputs
if x.size == 0:
return cupy.zeros(x.shape, dtype=x.dtype), None
if self.y is not None:
y = self.y
else:
y = log_softmax._log_softmax(x)
cupy.exp(y, out=y)
gloss = grad_outputs[0]
n_unit = t.size // len(t)
if self.reduce == 'mean':
coeff = gloss * self._coeff
else:
coeff = gloss[:, None, ...]
if self.class_weight is None:
gx = cuda.elementwise(
'T y, S t, T coeff, S n_channel, S n_unit, S ignore_label',
'T gx',
'''
const int c = (i / n_unit % n_channel);
gx = t == ignore_label ? 0 : coeff * (y - (c == t));
''',
'softmax_crossent_bwd')(
y, cupy.expand_dims(t, 1), coeff, x.shape[1],
n_unit, self.ignore_label)
else:
gx = cuda.elementwise(
'T y, raw T w, S t, T coeff, S n_channel, S n_unit, '
'S ignore_label',
'T gx',
'''
const int c = (i / n_unit % n_channel);
gx = t == ignore_label ? 0 : coeff * (y - (c == t)) * w[t];
''',
'softmax_crossent_weight_bwd')(
y, self.class_weight, cupy.expand_dims(t, 1), coeff,
x.shape[1], n_unit, self.ignore_label)
return gx, None
def _double_backward_softmax_cross_entropy(x, t, normalize, class_weight,
ignore_label, reduce):
if isinstance(t, variable.Variable):
t = t.data
_check_class_weight_option(class_weight)
_check_reduce_option(reduce)
if chainer.is_debug():
_check_input_values(x, t, ignore_label)
loss = -chainer.functions.log_softmax(x)
if class_weight is not None:
shape = [1 if d != 1 else -1 for d in six.moves.range(x.ndim)]
class_weight = chainer.functions.broadcast_to(
class_weight.reshape(shape), x.shape)
loss = loss * class_weight
in_use = (t != ignore_label).astype(x.dtype)
loss = chainer.functions.rollaxis(loss, 1, loss.ndim)
loss = chainer.functions.reshape(loss, (-1, loss.shape[-1]))
# Replace ignore_label value with one valid for F.select_item below.
t = t.clip(0, loss.shape[1] - 1)
loss = chainer.functions.select_item(loss, t.ravel())
loss = chainer.functions.reshape(loss, t.shape)
loss = loss * in_use
if reduce == 'mean':
if normalize:
count = in_use.sum()
else:
count = len(x)
count = max(count, 1.)
loss = loss / count
return chainer.functions.sum(loss)
else:
return loss
def softmax_cross_entropy(
x, t, normalize=True, cache_score=True, class_weight=None,
ignore_label=-1, reduce='mean', enable_double_backprop=False):
"""Computes cross entropy loss for pre-softmax activations.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Variable holding a multidimensional array whose element indicates
unnormalized log probability: the first axis of the variable
represents the number of samples, and the second axis represents
the number of classes. While this function computes a usual softmax
cross entropy if the number of dimensions is equal to 2, it
computes a cross entropy of the replicated softmax if the number of
dimensions is greater than 2.
t (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
Variable holding a signed integer vector of ground truth
labels. If ``t[i] == ignore_label``, corresponding ``x[i]`` is
ignored.
normalize (bool): If ``True``, this function normalizes the cross
entropy loss across all instances. If ``False``, it only
normalizes along a batch size.
cache_score (bool): When it is ``True``, the function stores result
of forward computation to use it on backward computation. It
reduces computational cost though consumes more memory.
If ``enable_double_backprop`` option is ``True``, this option
is forcibly turned off and the function does not cache
the intermediate value.
class_weight (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`):
An array that contains constant weights that will be multiplied
with the loss values along with the second dimension. The shape of
this array should be ``(x.shape[1],)``. If this is not ``None``,
each class weight ``class_weight[i]`` is actually multiplied to
``y[:, i]`` that is the corresponding log-softmax output of ``x``
and has the same shape as ``x`` before calculating the actual loss
value.
ignore_label (int): Label value you want to ignore. Its default value
is ``-1``. See description of the argument `t`.
reduce (str): A string that determines whether to reduce the loss
values. If it is ``'mean'``, it computes the sum of the individual
cross entropy and normalize it according to ``normalize`` option.
If it is ``'no'``, this function computes cross entropy for each
instance and does not normalize it (``normalize`` option is
ignored). In this case, the loss value of the ignored instance,
which has ``ignore_label`` as its target value, is set to ``0``.
enable_double_backprop (bool): If ``True``, this function uses
implementation that supports higher order differentiation.
If ``False``, it uses single-backprop implementation.
This function use the single-backprop version because we expect
it is faster. So, if you need second or higher derivatives,
you need to turn it on explicitly.
Returns:
~chainer.Variable: A variable holding a scalar array of the cross
entropy loss. If ``reduce`` is ``'mean'``, it is a scalar array.
If ``reduce`` is ``'no'``, the shape is same as that of ``x``.
.. note::
This function is differentiable only by ``x``.
.. admonition:: Example
>>> x = np.array([[-1, 0, 1, 2], [2, 0, 1, -1]]).astype(np.float32)
>>> x
array([[-1., 0., 1., 2.],
[ 2., 0., 1., -1.]], dtype=float32)
>>> t = np.array([3, 0]).astype(np.int32)
>>> t
array([3, 0], dtype=int32)
>>> y = F.softmax_cross_entropy(x, t)
>>> y
variable(0.44018972)
>>> log_softmax = -F.log_softmax(x)
>>> expected_loss = np.mean([log_softmax[row, column].data \
for row, column in enumerate(t)])
>>> y.array == expected_loss
True
"""
if enable_double_backprop:
return _double_backward_softmax_cross_entropy(
x, t, normalize, class_weight, ignore_label, reduce)
else:
return SoftmaxCrossEntropy(
normalize, cache_score, class_weight, ignore_label, reduce)(x, t)
| |
#!/usr/bin/env python
# encoding: utf-8
"""
Author(s): Matthew Loper
See LICENCE.txt for licensing and contact information.
"""
import time
from numpy import *
import unittest
from . import ch
from .optimization import minimize
from .ch import Ch
import numpy as np
from scipy.optimize import rosen, rosen_der
from .utils import row, col
visualize = False
def Rosen():
args = {
'x1': Ch(-120.),
'x2': Ch(-100.)
}
r1 = Ch(lambda x1, x2 : (x2 - x1**2.) * 10., args)
r2 = Ch(lambda x1 : x1 * -1. + 1, args)
func = [r1, r2]
return func, [args['x1'], args['x2']]
class Madsen(Ch):
dterms = ('x',)
def compute_r(self):
x1 = self.x.r[0]
x2 = self.x.r[1]
result = np.array((
x1**2 + x2**2 + x1 * x2,
np.sin(x1),
np.cos(x2)
))
return result
def compute_dr_wrt(self, wrt):
if wrt is not self.x:
return None
jac = np.zeros((3,2))
x1 = self.x.r[0]
x2 = self.x.r[1]
jac[0,0] = 2. * x1 + x2
jac[0,1] = 2. * x2 + x1
jac[1,0] = np.cos(x1)
jac[1,1] = 0
jac[2,0] = 0
jac[2,1] = -np.sin(x2)
return jac
def set_and_get_r(self, x_in):
self.x = Ch(x_in)
return col(self.r)
def set_and_get_dr(self, x_in):
self.x = Ch(x_in)
return self.dr_wrt(self.x)
class RosenCh(Ch):
dterms = ('x',)
def compute_r(self):
result = np.array((rosen(self.x.r) ))
return result
def set_and_get_r(self, x_in):
self.x = Ch(x_in)
return col(self.r)
def set_and_get_dr(self, x_in):
self.x = Ch(x_in)
return self.dr_wrt(self.x).flatten()
def compute_dr_wrt(self, wrt):
if wrt is self.x:
if visualize:
import matplotlib.pyplot as plt
residuals = np.sum(self.r**2)
print('------> RESIDUALS %.2e' % (residuals,))
print('------> CURRENT GUESS %s' % (str(self.x.r),))
plt.figure(123)
if not hasattr(self, 'vs'):
self.vs = []
self.xs = []
self.ys = []
self.vs.append(residuals)
self.xs.append(self.x.r[0])
self.ys.append(self.x.r[1])
plt.clf();
plt.subplot(1,2,1)
plt.plot(self.vs)
plt.subplot(1,2,2)
plt.plot(self.xs, self.ys)
plt.draw()
return row(rosen_der(self.x.r))
class TestOptimization(unittest.TestCase):
def test_dogleg_rosen(self):
obj, freevars = Rosen()
minimize(fun=obj, x0=freevars, method='dogleg', options={'maxiter': 337, 'disp': False})
self.assertTrue(freevars[0].r[0]==1.)
self.assertTrue(freevars[1].r[0]==1.)
def test_dogleg_madsen(self):
obj = Madsen(x = Ch(np.array((3.,1.))))
minimize(fun=obj, x0=[obj.x], method='dogleg', options={'maxiter': 34, 'disp': False})
self.assertTrue(np.sum(obj.r**2)/2 < 0.386599528247)
@unittest.skip('negative sign in exponent screws with reverse mode')
def test_bfgs_rosen(self):
from .optimization import minimize_bfgs_lsq
obj, freevars = Rosen()
minimize_bfgs_lsq(obj=obj, niters=421, verbose=False, free_variables=freevars)
self.assertTrue(freevars[0].r[0]==1.)
self.assertTrue(freevars[1].r[0]==1.)
def test_bfgs_madsen(self):
from .ch import SumOfSquares
import scipy.optimize
obj = Ch(lambda x : SumOfSquares(Madsen(x = x)) )
def errfunc(x):
obj.x = Ch(x)
return obj.r
def gradfunc(x):
obj.x = Ch(x)
return obj.dr_wrt(obj.x).ravel()
x0 = np.array((3., 1.))
# Optimize with built-in bfgs.
# Note: with 8 iters, this actually requires 14 gradient evaluations.
# This can be verified by setting "disp" to 1.
#tm = time.time()
x1 = scipy.optimize.fmin_bfgs(errfunc, x0, fprime=gradfunc, maxiter=8, disp=0)
#print 'forward: took %.es' % (time.time() - tm,)
self.assertLess(obj.r/2., 0.4)
# Optimize with chumpy's minimize (which uses scipy's bfgs).
obj.x = x0
minimize(fun=obj, x0=[obj.x], method='bfgs', options={'maxiter': 8, 'disp': False})
self.assertLess(obj.r/2., 0.4)
def test_nested_select(self):
def beales(x, y):
e1 = 1.5 - x + x*y
e2 = 2.25 - x + x*(y**2)
e3 = 2.625 - x + x*(y**3)
return {'e1': e1, 'e2': e2, 'e3': e3}
x1 = ch.zeros(10)
y1 = ch.zeros(10)
# With a single select this worked
minimize(beales(x1, y1), x0=[x1[1:4], y1], method='dogleg', options={'disp': False})
x2 = ch.zeros(10)
y2 = ch.zeros(10)
# But this used to raise `AttributeError: 'Select' object has no attribute 'x'`
minimize(beales(x2, y2), x0=[x2[1:8][:3], y2], method='dogleg', options={'disp': False})
np.testing.assert_array_equal(x1, x2)
np.testing.assert_array_equal(y1, y2)
suite = unittest.TestLoader().loadTestsFromTestCase(TestOptimization)
if __name__ == '__main__':
if False: # show rosen
import matplotlib.pyplot as plt
visualize = True
plt.ion()
unittest.main()
import pdb; pdb.set_trace()
else:
unittest.main()
| |
# -*- coding: utf-8 -*-
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
class flowbtc (Exchange):
def describe(self):
return self.deep_extend(super(flowbtc, self).describe(), {
'id': 'flowbtc',
'name': 'flowBTC',
'countries': 'BR', # Brazil
'version': 'v1',
'rateLimit': 1000,
'hasCORS': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/28162465-cd815d4c-67cf-11e7-8e57-438bea0523a2.jpg',
'api': 'https://api.flowbtc.com:8400/ajax',
'www': 'https://trader.flowbtc.com',
'doc': 'http://www.flowbtc.com.br/api/',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
},
'api': {
'public': {
'post': [
'GetTicker',
'GetTrades',
'GetTradesByDate',
'GetOrderBook',
'GetProductPairs',
'GetProducts',
],
},
'private': {
'post': [
'CreateAccount',
'GetUserInfo',
'SetUserInfo',
'GetAccountInfo',
'GetAccountTrades',
'GetDepositAddresses',
'Withdraw',
'CreateOrder',
'ModifyOrder',
'CancelOrder',
'CancelAllOrders',
'GetAccountOpenOrders',
'GetOrderFee',
],
},
},
})
def fetch_markets(self):
response = self.publicPostGetProductPairs()
markets = response['productPairs']
result = []
for p in range(0, len(markets)):
market = markets[p]
id = market['name']
base = market['product1Label']
quote = market['product2Label']
symbol = base + '/' + quote
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostGetAccountInfo()
balances = response['currencies']
result = {'info': response}
for b in range(0, len(balances)):
balance = balances[b]
currency = balance['name']
account = {
'free': balance['balance'],
'used': balance['hold'],
'total': 0.0,
}
account['total'] = self.sum(account['free'], account['used'])
result[currency] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
orderbook = self.publicPostGetOrderBook(self.extend({
'productPair': market['id'],
}, params))
return self.parse_order_book(orderbook, None, 'bids', 'asks', 'px', 'qty')
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
ticker = self.publicPostGetTicker(self.extend({
'productPair': market['id'],
}, params))
timestamp = self.milliseconds()
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['bid']),
'ask': float(ticker['ask']),
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': float(ticker['last']),
'change': None,
'percentage': None,
'average': None,
'baseVolume': float(ticker['volume24hr']),
'quoteVolume': float(ticker['volume24hrProduct2']),
'info': ticker,
}
def parse_trade(self, trade, market):
timestamp = trade['unixtime'] * 1000
side = 'buy' if (trade['incomingOrderSide'] == 0) else 'sell'
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'id': str(trade['tid']),
'order': None,
'type': None,
'side': side,
'price': trade['px'],
'amount': trade['qty'],
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicPostGetTrades(self.extend({
'ins': market['id'],
'startIndex': -1,
}, params))
return self.parse_trades(response['trades'], market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
orderType = 1 if (type == 'market') else 0
order = {
'ins': self.market_id(symbol),
'side': side,
'orderType': orderType,
'qty': amount,
'px': price,
}
response = self.privatePostCreateOrder(self.extend(order, params))
return {
'info': response,
'id': response['serverOrderId'],
}
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
if 'ins' in params:
return self.privatePostCancelOrder(self.extend({
'serverOrderId': id,
}, params))
raise ExchangeError(self.id + ' requires `ins` symbol parameter for cancelling an order')
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.version + '/' + path
if api == 'public':
if params:
body = self.json(params)
else:
self.check_required_credentials()
nonce = self.nonce()
auth = str(nonce) + self.uid + self.apiKey
signature = self.hmac(self.encode(auth), self.encode(self.secret))
body = self.json(self.extend({
'apiKey': self.apiKey,
'apiNonce': nonce,
'apiSig': signature.upper(),
}, params))
headers = {
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if 'isAccepted' in response:
if response['isAccepted']:
return response
raise ExchangeError(self.id + ' ' + self.json(response))
| |
#!/usr/bin/env python
"""
Copyright 2015 ARC Centre of Excellence for Climate Systems Science
author: Aidan Heerdegen <aidan.heerdegen@anu.edu.au>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import pytest
import sys
import os
import shutil
import subprocess
import shlex
import datetime, time
from fnmatch import fnmatch
from mdssdiff.mdssdiff import diffdir, parse_args, main
import pdb #; pdb.set_trace()
dirs = ["1","2","3"]
dirtree = os.path.join(*dirs)
print(dirtree)
paths = [ ["1","lala"], ["1","po"], ["1","2","Mickey"], ["1","2","Minny"], ["1","2","Pluto"], ["1","2","3","Ren"], ["1","2","3","Stimpy"] ]
user=os.environ.get('USER','username')
prefix = os.path.join(user,"test_mdss")
dirtreeroot = dirs[0]
verbose=0
project=os.environ.get('PROJECT','a12')
def touch(fname, times=None):
# http://stackoverflow.com/a/1160227/4727812
if times is not None:
times = (times,times)
with open(fname, 'a'):
os.utime(fname, times)
def runcmd(cmd):
subprocess.check_call(shlex.split(cmd),stderr=subprocess.STDOUT)
def remove_local_files():
try:
shutil.rmtree(dirtreeroot)
except:
pass
def remove_remote_files():
try:
runcmd('mdss rm -rf {}'.format(os.path.join(prefix,dirtreeroot)))
# Remove all the files
for p in paths:
runcmd('mdss rm {}'.format(os.path.join(*p)))
# Now try and remote the directories
dirs = set([os.path.join(*p[-1]) for p in paths])
for d in dirs:
runcmd('mdss rmdir {}'.format(d))
except:
pass
def remove_files():
remove_local_files()
remove_remote_files()
def setup_files():
for p in paths:
touch(os.path.join(*p))
# shutil.copytree(dirtreeroot, os.path.join(remote,dirtreeroot))
runcmd('mdss mkdir {}'.format(prefix))
runcmd('mdss put -r {} {}'.format(dirs[0],prefix))
def setup_module(module):
if verbose: print ("setup_module module:%s" % module.__name__)
remove_files()
os.makedirs(dirtree)
setup_files()
def teardown_module(module):
if verbose: print ("setup_module module:%s" % module.__name__)
remove_files()
def test_diffdir():
missinglocal, missingremote, mismatchedsizes, mismatchedtimes = diffdir(prefix, dirtreeroot, project, recursive=True)
assert(len(missinglocal) == 0)
assert(len(missingremote) == 0)
assert(len(mismatchedsizes) == 0)
assert(len(mismatchedtimes) == 0)
# Remove a local file
file = os.path.join(*paths[5])
os.remove(file)
if verbose: print('removing {}'.format(file))
missinglocal, missingremote, mismatchedsizes, mismatchedtimes = diffdir(prefix, dirtreeroot, project, recursive=True, verbose=verbose)
assert(missinglocal == [file])
assert(len(missingremote) == 0)
assert(len(mismatchedsizes) == 0)
assert(len(mismatchedtimes) == 0)
# Remove same remote file
remotefile = os.path.join(prefix,file)
if verbose: print('removing {}'.format(remotefile))
runcmd('mdss -P {} rm {}'.format(project,remotefile))
missinglocal, missingremote, mismatchedsizes, mismatchedtimes = diffdir(prefix, dirtreeroot, project, recursive=True, verbose=verbose)
assert(len(missinglocal) == 0)
assert(len(missingremote) == 0)
assert(len(mismatchedsizes) == 0)
assert(len(mismatchedtimes) == 0)
# Remove a remote file
file = os.path.join(*paths[3])
remotefile = os.path.join(prefix,file)
if verbose: print('removing {}'.format(remotefile))
runcmd('mdss -P {} rm {}'.format(project,remotefile))
missinglocal, missingremote, mismatchedsizes, mismatchedtimes = diffdir(prefix, dirtreeroot, project, recursive=True, verbose=verbose)
assert(missingremote == [file])
assert(len(missinglocal) == 0)
assert(len(mismatchedsizes) == 0)
assert(len(mismatchedtimes) == 0)
# Remove same local file
os.remove(file)
missinglocal, missingremote, mismatchedsizes, mismatchedtimes = diffdir(prefix, dirtreeroot, project, recursive=True, verbose=verbose)
assert(len(missinglocal) == 0)
assert(len(missingremote) == 0)
assert(len(mismatchedsizes) == 0)
assert(len(mismatchedtimes) == 0)
# Write 3 bytes into a local file
file = os.path.join(*paths[2])
fh = open(file,"wb")
fh.write(b"\x5F\x9D\x3E")
fh.close()
dt = datetime.datetime.now() - datetime.timedelta(days=1)
touch(file,time.mktime(dt.timetuple()))
missinglocal, missingremote, mismatchedsizes, mismatchedtimes = diffdir(prefix, dirtreeroot, project, recursive=True, verbose=verbose)
assert(len(missinglocal) == 0)
assert(len(missingremote) == 0)
assert(mismatchedsizes == {file : (3,0)})
# The time may vary, but we can't know for sure, so we won't check this
assert(mismatchedtimes)
def test_sync():
# Syncing different sized file from previous test
main(parse_args(shlex.split("-r -P {} -cr -f -p {} {}".format(project,prefix,dirs[0]))))
missinglocal, missingremote, mismatchedsizes, mismatchedtimes = diffdir(prefix, dirtreeroot, project, recursive=True, verbose=verbose)
assert(len(missinglocal) == 0)
assert(len(missingremote) == 0)
assert(len(mismatchedsizes) == 0)
assert(len(mismatchedtimes) == 0)
# (re)Make a local file
file = os.path.join(*paths[5])
touch(file)
missinglocal, missingremote, mismatchedsizes, mismatchedtimes = diffdir(prefix, dirtreeroot, project, recursive=True, verbose=verbose)
assert(missingremote == [ file ])
assert(len(missinglocal) == 0)
assert(len(mismatchedsizes) == 0)
assert(len(mismatchedtimes) == 0)
# Copy to remote
main(parse_args(shlex.split("-r -P {} -cr -f -p {} {}".format(project,prefix,dirs[0]))))
missinglocal, missingremote, mismatchedsizes, mismatchedtimes = diffdir(prefix, dirtreeroot, project, recursive=True, verbose=verbose)
assert(len(missinglocal) == 0)
assert(len(missingremote) == 0)
assert(len(mismatchedsizes) == 0)
assert(len(mismatchedtimes) == 0)
# Remove same remote file
remotefile = os.path.join(prefix,file)
if verbose: print('removing {}'.format(remotefile))
runcmd('mdss -P {} rm {}'.format(project,remotefile))
missinglocal, missingremote, mismatchedsizes, mismatchedtimes = diffdir(prefix, dirtreeroot, project, recursive=True, verbose=verbose)
assert(missingremote == [ file ])
assert(len(missinglocal) == 0)
assert(len(mismatchedsizes) == 0)
assert(len(mismatchedtimes) == 0)
# Copy to remote
main(parse_args(shlex.split("-r -P {} -cr -f -p {} {}".format(project,prefix,dirs[0]))))
# Change the time
dt = datetime.datetime.fromtimestamp(os.path.getmtime(file)) - datetime.timedelta(days=1)
touch(file,time.mktime(dt.timetuple()))
missinglocal, missingremote, mismatchedsizes, mismatchedtimes = diffdir(prefix, dirtreeroot, project, recursive=True, verbose=verbose)
assert(len(missinglocal) == 0)
assert(len(missingremote) == 0)
assert(len(mismatchedsizes) == 0)
assert((mismatchedtimes[file][1] - mismatchedtimes[file][0]) == datetime.timedelta(days=1))
# Copy to remote
main(parse_args(shlex.split("-r -P {} -cr -f -p {} {}".format(project,prefix,dirs[0]))))
missinglocal, missingremote, mismatchedsizes, mismatchedtimes = diffdir(prefix, dirtreeroot, project, recursive=True, verbose=verbose)
assert(len(missinglocal) == 0)
assert(len(missingremote) == 0)
assert(len(mismatchedsizes) == 0)
def test_match():
setup_files()
pattern = '*M*'
other_pattern = '*i*'
files = []
other_files = []
for p in paths:
file = os.path.join(*p)
if fnmatch(file,pattern):
files.append(file)
print("Removing {}".format(file))
os.remove(file)
if fnmatch(file,other_pattern):
other_files.append(file)
missinglocal, missingremote, mismatchedsizes, mismatchedtimes = diffdir(prefix, dirtreeroot, project, recursive=True, verbose=verbose)
assert(sorted(missinglocal) == sorted(files))
assert(len(missingremote) == 0)
assert(len(mismatchedsizes) == 0)
assert(len(mismatchedtimes) == 0)
# Should be no overlap between this match and files removed above
missinglocal, missingremote, mismatchedsizes, mismatchedtimes = diffdir(prefix, dirtreeroot, project, recursive=True, verbose=verbose, match="*la*")
assert(len(missinglocal) == 0)
assert(len(missingremote) == 0)
assert(len(mismatchedsizes) == 0)
assert(len(mismatchedtimes) == 0)
missinglocal, missingremote, mismatchedsizes, mismatchedtimes = diffdir(prefix, dirtreeroot, project, recursive=True, verbose=verbose, match=other_pattern)
assert(missinglocal == sorted(list(set(files).intersection(other_files))))
assert(len(missingremote) == 0)
assert(len(mismatchedsizes) == 0)
assert(len(mismatchedtimes) == 0)
setup_files()
files = []
other_files = []
for p in paths:
file = os.path.join(*p)
if fnmatch(file,pattern):
files.append(file)
# Remove same remote file
remotefile = os.path.join(prefix,file)
if verbose: print('removing {}'.format(remotefile))
runcmd('mdss -P {} rm {}'.format(project,remotefile))
if fnmatch(file,other_pattern):
other_files.append(file)
missinglocal, missingremote, mismatchedsizes, mismatchedtimes = diffdir(prefix, dirtreeroot, project, recursive=True, verbose=verbose)
assert(sorted(missingremote) == sorted(files))
assert(len(missinglocal) == 0)
assert(len(mismatchedsizes) == 0)
assert(len(mismatchedtimes) == 0)
# Should be no overlap between this match and files removed above
missinglocal, missingremote, mismatchedsizes, mismatchedtimes = diffdir(prefix, dirtreeroot, project, recursive=True, verbose=verbose, match="*la*")
assert(len(missinglocal) == 0)
assert(len(missingremote) == 0)
assert(len(mismatchedsizes) == 0)
assert(len(mismatchedtimes) == 0)
missinglocal, missingremote, mismatchedsizes, mismatchedtimes = diffdir(prefix, dirtreeroot, project, recursive=True, verbose=verbose, match=other_pattern)
assert(sorted(missingremote) == sorted(list(set(files).intersection(other_files))))
assert(len(missinglocal) == 0)
assert(len(mismatchedsizes) == 0)
assert(len(mismatchedtimes) == 0)
setup_files()
remove_local_files()
missinglocal, missingremote, mismatchedsizes, mismatchedtimes = diffdir(prefix, dirtreeroot, project, recursive=True, verbose=verbose)
assert(len(missingremote) == 0)
assert(len(mismatchedsizes) == 0)
assert(len(mismatchedtimes) == 0)
assert(sorted(missinglocal) == sorted([os.path.join(*p) for p in paths]))
missinglocal, missingremote, mismatchedsizes, mismatchedtimes = diffdir(prefix, dirtreeroot, project, recursive=True, verbose=verbose, match=other_pattern)
assert(len(missingremote) == 0)
assert(len(mismatchedsizes) == 0)
assert(len(mismatchedtimes) == 0)
assert(sorted(missinglocal) == sorted(other_files))
| |
from unittest import TestCase
import errno
from mock import ANY, Mock, call, mock_open, patch
from os import path
try:
import __builtin__ as builtins
except ImportError:
import builtins
from ceres import CeresNode, CeresSlice, CeresTree
from ceres import DATAPOINT_SIZE, DEFAULT_NODE_CACHING_BEHAVIOR, DEFAULT_SLICE_CACHING_BEHAVIOR,\
DEFAULT_TIMESTEP, DIR_PERMS, MAX_SLICE_GAP
from ceres import getTree, CorruptNode, NoData, NodeDeleted, NodeNotFound, SliceDeleted,\
SliceGapTooLarge, TimeSeriesData, InvalidAggregationMethod
def fetch_mock_open_writes(open_mock):
handle = open_mock()
# XXX Python3 compability since a write can be bytes or str
try:
return b''.join([c[0][0] for c in handle.write.call_args_list])
except TypeError:
return ''.join([c[0][0] for c in handle.write.call_args_list])
def make_slice_mock(start, end, step):
slice_mock = Mock(spec=CeresSlice)
slice_mock.startTime = start
slice_mock.endTime = end
slice_mock.timeStep = step
def side_effect(*args, **kwargs):
startTime, endTime = args
result_start = max(startTime, start)
result_end = min(endTime, end)
points = (result_end - result_start) // step
return TimeSeriesData(result_start, result_end, step, [float(x) for x in range(points)])
slice_mock.read.side_effect = side_effect
return slice_mock
class ModuleFunctionsTest(TestCase):
@patch('ceres.isdir', new=Mock(return_value=False))
@patch('ceres.CeresTree', new=Mock(spec=CeresTree))
def test_get_tree_with_no_tree(self):
tree = getTree('/graphite/storage/ceres/foo/bar')
self.assertEqual(None, tree)
@patch('ceres.CeresTree', spec=CeresTree)
@patch('ceres.isdir')
def test_get_tree_with_tree_samedir(self, isdir_mock, ceres_tree_mock):
isdir_mock.return_value = True
tree = getTree('/graphite/storage/ceres')
self.assertNotEqual(None, tree)
isdir_mock.assert_called_once_with('/graphite/storage/ceres/.ceres-tree')
ceres_tree_mock.assert_called_once_with('/graphite/storage/ceres')
class TimeSeriesDataTest(TestCase):
def setUp(self):
self.time_series = TimeSeriesData(0, 50, 5, [float(x) for x in range(0, 10)])
def test_timestamps_property(self):
self.assertEqual(10, len(self.time_series.timestamps))
self.assertEqual(0, self.time_series.timestamps[0])
self.assertEqual(45, self.time_series.timestamps[-1])
def test_iter_values(self):
values = list(self.time_series)
self.assertEqual(10, len(values))
self.assertEqual((0, 0.0), values[0])
self.assertEqual((45, 9.0), values[-1])
def test_merge_no_missing(self):
# merge only has effect if time series has no gaps
other_series = TimeSeriesData(0, 25, 5, [float(x * x) for x in range(1, 6)])
original_values = list(self.time_series)
self.time_series.merge(other_series)
self.assertEqual(original_values, list(self.time_series))
def test_merge_with_empty(self):
new_series = TimeSeriesData(0, 50, 5, [None] * 10)
new_series.merge(self.time_series)
self.assertEqual(list(self.time_series), list(new_series))
def test_merge_with_holes(self):
values = []
for x in range(0, 10):
if x % 2 == 0:
values.append(x)
else:
values.append(None)
new_series = TimeSeriesData(0, 50, 5, values)
new_series.merge(self.time_series)
self.assertEqual(list(self.time_series), list(new_series))
class CeresTreeTest(TestCase):
def setUp(self):
with patch('ceres.isdir', new=Mock(return_value=True)):
self.ceres_tree = CeresTree('/graphite/storage/ceres')
@patch('ceres.isdir', new=Mock(return_value=False))
def test_init_invalid(self):
self.assertRaises(ValueError, CeresTree, '/nonexistent_path')
@patch('ceres.isdir', new=Mock(return_value=True))
@patch('ceres.abspath')
def test_init_valid(self, abspath_mock):
abspath_mock.return_value = '/var/graphite/storage/ceres'
tree = CeresTree('/graphite/storage/ceres')
abspath_mock.assert_called_once_with('/graphite/storage/ceres')
self.assertEqual('/var/graphite/storage/ceres', tree.root)
@patch('ceres.isdir', new=Mock(return_value=True))
def test_init_sets_default_cache_behavior(self):
tree = CeresTree('/graphite/storage/ceres')
self.assertEqual(DEFAULT_NODE_CACHING_BEHAVIOR, tree.nodeCachingBehavior)
@patch('ceres.isdir', new=Mock(return_value=False))
@patch.object(CeresTree, '__init__')
@patch('os.makedirs')
def test_create_tree_new_dir(self, makedirs_mock, ceres_tree_init_mock):
ceres_tree_init_mock.return_value = None
with patch.object(builtins, 'open', mock_open()) as open_mock:
CeresTree.createTree('/graphite/storage/ceres')
makedirs_mock.assert_called_once_with('/graphite/storage/ceres/.ceres-tree', DIR_PERMS)
self.assertFalse(open_mock.called)
ceres_tree_init_mock.assert_called_once_with('/graphite/storage/ceres')
@patch('ceres.isdir', new=Mock(return_value=True))
@patch.object(CeresTree, '__init__')
@patch('os.makedirs')
def test_create_tree_existing_dir(self, makedirs_mock, ceres_tree_init_mock):
ceres_tree_init_mock.return_value = None
with patch.object(builtins, 'open', mock_open()) as open_mock:
CeresTree.createTree('/graphite/storage/ceres')
self.assertFalse(makedirs_mock.called)
self.assertFalse(open_mock.called)
ceres_tree_init_mock.assert_called_once_with('/graphite/storage/ceres')
@patch('ceres.isdir', new=Mock(return_value=True))
@patch.object(CeresTree, '__init__', new=Mock(return_value=None))
@patch('os.makedirs', new=Mock())
def test_create_tree_write_props(self):
props = {
"foo_prop": "foo_value",
"bar_prop": "bar_value"}
with patch.object(builtins, 'open', mock_open()) as open_mock:
CeresTree.createTree('/graphite/storage/ceres', **props)
for (prop, value) in props.items():
open_mock.assert_any_call(path.join('/graphite/storage/ceres', '.ceres-tree', prop), 'w')
open_mock.return_value.write.assert_any_call(value)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
def test_get_node_path_clean(self):
result = self.ceres_tree.getNodePath('/graphite/storage/ceres/metric/foo')
self.assertEqual('metric.foo', result)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
def test_get_node_path_trailing_slash(self):
result = self.ceres_tree.getNodePath('/graphite/storage/ceres/metric/foo/')
self.assertEqual('metric.foo', result)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
def test_get_node_path_outside_tree(self):
self.assertRaises(ValueError, self.ceres_tree.getNodePath, '/metric/foo')
@patch('ceres.CeresNode', spec=CeresNode)
def test_get_node_uncached(self, ceres_node_mock):
ceres_node_mock.isNodeDir.return_value = True
result = self.ceres_tree.getNode('metrics.foo')
ceres_node_mock.assert_called_once_with(
self.ceres_tree,
'metrics.foo',
'/graphite/storage/ceres/metrics/foo')
self.assertEqual(result, ceres_node_mock())
@patch('ceres.CeresNode', spec=CeresNode)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
@patch('ceres.glob', new=Mock(side_effect=lambda x: [x]))
def test_find_explicit_metric(self, ceres_node_mock):
ceres_node_mock.isNodeDir.return_value = True
result = list(self.ceres_tree.find('metrics.foo'))
self.assertEqual(1, len(result))
self.assertEqual(result[0], ceres_node_mock())
@patch('ceres.CeresNode', spec=CeresNode)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
@patch('ceres.glob')
def test_find_wildcard(self, glob_mock, ceres_node_mock):
matches = ['foo', 'bar', 'baz']
glob_mock.side_effect = lambda x: [x.replace('*', m) for m in matches]
ceres_node_mock.isNodeDir.return_value = True
result = list(self.ceres_tree.find('metrics.*'))
self.assertEqual(3, len(result))
ceres_node_mock.assert_any_call(self.ceres_tree, 'metrics.foo', ANY)
ceres_node_mock.assert_any_call(self.ceres_tree, 'metrics.bar', ANY)
ceres_node_mock.assert_any_call(self.ceres_tree, 'metrics.baz', ANY)
@patch('ceres.CeresNode', spec=CeresNode)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
@patch('ceres.glob', new=Mock(return_value=[]))
def test_find_wildcard_no_matches(self, ceres_node_mock):
ceres_node_mock.isNodeDir.return_value = False
result = list(self.ceres_tree.find('metrics.*'))
self.assertEqual(0, len(result))
self.assertFalse(ceres_node_mock.called)
@patch('ceres.CeresNode', spec=CeresNode)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
@patch('ceres.glob', new=Mock(side_effect=lambda x: [x]))
def test_find_metric_with_interval(self, ceres_node_mock):
ceres_node_mock.isNodeDir.return_value = True
ceres_node_mock.return_value.hasDataForInterval.return_value = False
result = list(self.ceres_tree.find('metrics.foo', 0, 1000))
self.assertEqual(0, len(result))
ceres_node_mock.return_value.hasDataForInterval.assert_called_once_with(0, 1000)
@patch('ceres.CeresNode', spec=CeresNode)
@patch('ceres.abspath', new=Mock(side_effect=lambda x: x))
@patch('ceres.glob', new=Mock(side_effect=lambda x: [x]))
def test_find_metric_with_interval_not_found(self, ceres_node_mock):
ceres_node_mock.isNodeDir.return_value = True
ceres_node_mock.return_value.hasDataForInterval.return_value = True
result = list(self.ceres_tree.find('metrics.foo', 0, 1000))
self.assertEqual(result[0], ceres_node_mock())
ceres_node_mock.return_value.hasDataForInterval.assert_called_once_with(0, 1000)
def test_store_invalid_node(self):
with patch.object(self.ceres_tree, 'getNode', new=Mock(return_value=None)):
datapoints = [(100, 1.0)]
self.assertRaises(NodeNotFound, self.ceres_tree.store, 'metrics.foo', datapoints)
@patch('ceres.CeresNode', spec=CeresNode)
def test_store_valid_node(self, ceres_node_mock):
datapoints = [(100, 1.0)]
self.ceres_tree.store('metrics.foo', datapoints)
ceres_node_mock.assert_called_once_with(self.ceres_tree, 'metrics.foo', ANY)
ceres_node_mock.return_value.write.assert_called_once_with(datapoints)
def fetch_invalid_node(self):
with patch.object(self.ceres_tree, 'getNode', new=Mock(return_value=None)):
self.assertRaises(NodeNotFound, self.ceres_tree.fetch, 'metrics.foo')
@patch('ceres.CeresNode', spec=CeresNode)
def fetch_metric(self, ceres_node_mock):
read_mock = ceres_node_mock.return_value.read
read_mock.return_value = Mock(spec=TimeSeriesData)
result = self.ceres_tree.fetch('metrics.foo', 0, 1000)
ceres_node_mock.assert_called_once_with(self.ceres_tree, 'metrics.foo', ANY)
read_mock.assert_called_once_with(0, 1000)
self.assertEqual(Mock(spec=TimeSeriesData), result)
def test_set_node_caching_behavior_validates_names(self):
self.ceres_tree.setNodeCachingBehavior('none')
self.assertEquals('none', self.ceres_tree.nodeCachingBehavior)
self.ceres_tree.setNodeCachingBehavior('all')
self.assertEquals('all', self.ceres_tree.nodeCachingBehavior)
self.assertRaises(ValueError, self.ceres_tree.setNodeCachingBehavior, 'foo')
# Assert unchanged
self.assertEquals('all', self.ceres_tree.nodeCachingBehavior)
class CeresNodeTest(TestCase):
def setUp(self):
with patch('ceres.isdir', new=Mock(return_value=True)):
with patch('ceres.exists', new=Mock(return_value=True)):
self.ceres_tree = CeresTree('/graphite/storage/ceres')
self.ceres_node = CeresNode(
self.ceres_tree,
'sample_metric',
'/graphite/storage/ceres/sample_metric')
self.ceres_node.timeStep = 60
slice_configs = [
(1200, 1800, 60),
(600, 1200, 60)]
self.ceres_slices = []
for start, end, step in slice_configs:
slice_mock = make_slice_mock(start, end, step)
self.ceres_slices.append(slice_mock)
def test_init_sets_default_cache_behavior(self):
ceres_node = CeresNode(
self.ceres_tree,
'sample_metric',
'/graphite/storage/ceres/sample_metric')
self.assertEqual(DEFAULT_SLICE_CACHING_BEHAVIOR, ceres_node.sliceCachingBehavior)
@patch('ceres.os.makedirs', new=Mock())
@patch('ceres.CeresNode.writeMetadata')
def test_create_sets_a_default_timestep(self, write_metadata_mock):
CeresNode.create(self.ceres_tree, 'sample_metric')
write_metadata_mock.assert_called_with(dict(timeStep=DEFAULT_TIMESTEP))
@patch('ceres.os.makedirs', new=Mock())
@patch('ceres.CeresNode.writeMetadata', new=Mock())
def test_create_returns_new_ceres_node(self):
ceres_node = CeresNode.create(self.ceres_tree, 'sample_metric')
self.assertTrue(isinstance(ceres_node, CeresNode))
def test_write_metadata(self):
import json
open_mock = mock_open()
metadata = dict(timeStep=60, aggregationMethod='avg')
with patch.object(builtins, 'open', open_mock):
self.ceres_node.writeMetadata(metadata)
self.assertEquals(json.dumps(metadata), fetch_mock_open_writes(open_mock))
def test_read_metadata_sets_timestep(self):
import json
metadata = dict(timeStep=60, aggregationMethod='avg')
json_metadata = json.dumps(metadata)
open_mock = mock_open(read_data=json_metadata)
with patch.object(builtins, 'open', open_mock):
self.ceres_node.readMetadata()
open_mock().read.assert_called_once()
self.assertEqual(60, self.ceres_node.timeStep)
def test_read_metadata_returns_corrupt_if_json_error(self):
with patch.object(builtins, 'open', mock_open()):
self.assertRaises(CorruptNode, self.ceres_node.readMetadata)
def test_set_slice_caching_behavior_validates_names(self):
self.ceres_node.setSliceCachingBehavior('none')
self.assertEquals('none', self.ceres_node.sliceCachingBehavior)
self.ceres_node.setSliceCachingBehavior('all')
self.assertEquals('all', self.ceres_node.sliceCachingBehavior)
self.ceres_node.setSliceCachingBehavior('latest')
self.assertEquals('latest', self.ceres_node.sliceCachingBehavior)
self.assertRaises(ValueError, self.ceres_node.setSliceCachingBehavior, 'foo')
# Assert unchanged
self.assertEquals('latest', self.ceres_node.sliceCachingBehavior)
def test_slices_is_a_generator(self):
from types import GeneratorType
self.assertTrue(isinstance(self.ceres_node.slices, GeneratorType))
def test_slices_returns_cached_set_when_behavior_is_all(self):
def mock_slice():
return Mock(spec=CeresSlice)
self.ceres_node.setSliceCachingBehavior('all')
cached_contents = [mock_slice for c in range(4)]
self.ceres_node.sliceCache = cached_contents
with patch('ceres.CeresNode.readSlices') as read_slices_mock:
slice_list = list(self.ceres_node.slices)
self.assertFalse(read_slices_mock.called)
self.assertEquals(cached_contents, slice_list)
def test_slices_returns_first_cached_when_behavior_is_latest(self):
self.ceres_node.setSliceCachingBehavior('latest')
cached_contents = Mock(spec=CeresSlice)
self.ceres_node.sliceCache = cached_contents
read_slices_mock = Mock(return_value=[])
with patch('ceres.CeresNode.readSlices', new=read_slices_mock):
slice_iter = self.ceres_node.slices
self.assertEquals(cached_contents, next(slice_iter))
# We should be yielding cached before trying to read
self.assertFalse(read_slices_mock.called)
def test_slices_reads_remaining_when_behavior_is_latest(self):
self.ceres_node.setSliceCachingBehavior('latest')
cached_contents = Mock(spec=CeresSlice)
self.ceres_node.sliceCache = cached_contents
read_slices_mock = Mock(return_value=[(0, 60)])
with patch('ceres.CeresNode.readSlices', new=read_slices_mock):
slice_iter = self.ceres_node.slices
next(slice_iter)
# *now* we expect to read from disk
try:
while True:
next(slice_iter)
except StopIteration:
pass
read_slices_mock.assert_called_once_with()
def test_slices_reads_from_disk_when_behavior_is_none(self):
self.ceres_node.setSliceCachingBehavior('none')
read_slices_mock = Mock(return_value=[(0, 60)])
with patch('ceres.CeresNode.readSlices', new=read_slices_mock):
slice_iter = self.ceres_node.slices
next(slice_iter)
read_slices_mock.assert_called_once_with()
def test_slices_reads_from_disk_when_cache_empty_and_behavior_all(self):
self.ceres_node.setSliceCachingBehavior('all')
read_slices_mock = Mock(return_value=[(0, 60)])
with patch('ceres.CeresNode.readSlices', new=read_slices_mock):
slice_iter = self.ceres_node.slices
next(slice_iter)
read_slices_mock.assert_called_once_with()
def test_slices_reads_from_disk_when_cache_empty_and_behavior_latest(self):
self.ceres_node.setSliceCachingBehavior('all')
read_slices_mock = Mock(return_value=[(0, 60)])
with patch('ceres.CeresNode.readSlices', new=read_slices_mock):
slice_iter = self.ceres_node.slices
next(slice_iter)
read_slices_mock.assert_called_once_with()
@patch('ceres.exists', new=Mock(return_value=False))
def test_read_slices_raises_when_node_doesnt_exist(self):
self.assertRaises(NodeDeleted, self.ceres_node.readSlices)
@patch('ceres.exists', new=Mock(return_Value=True))
def test_read_slices_ignores_not_slices(self):
listdir_mock = Mock(return_value=['0@60.slice', '0@300.slice', 'foo'])
with patch('ceres.os.listdir', new=listdir_mock):
self.assertEquals(2, len(self.ceres_node.readSlices()))
@patch('ceres.exists', new=Mock(return_Value=True))
def test_read_slices_parses_slice_filenames(self):
listdir_mock = Mock(return_value=['0@60.slice', '0@300.slice'])
with patch('ceres.os.listdir', new=listdir_mock):
slice_infos = self.ceres_node.readSlices()
self.assertTrue((0, 60) in slice_infos)
self.assertTrue((0, 300) in slice_infos)
@patch('ceres.exists', new=Mock(return_Value=True))
def test_read_slices_reverse_sorts_by_time(self):
listdir_mock = Mock(return_value=[
'0@60.slice',
'320@300.slice',
'120@120.slice',
'0@120.slice',
'600@300.slice'])
with patch('ceres.os.listdir', new=listdir_mock):
slice_infos = self.ceres_node.readSlices()
slice_timestamps = [s[0] for s in slice_infos]
self.assertEqual([600, 320, 120, 0, 0], slice_timestamps)
def test_no_data_exists_if_no_slices_exist(self):
with patch('ceres.CeresNode.readSlices', new=Mock(return_value=[])):
self.assertFalse(self.ceres_node.hasDataForInterval(0, 60))
def test_no_data_exists_if_no_slices_exist_and_no_time_specified(self):
with patch('ceres.CeresNode.readSlices', new=Mock(return_value=[])):
self.assertFalse(self.ceres_node.hasDataForInterval(None, None))
def test_data_exists_if_slices_exist_and_no_time_specified(self):
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.assertTrue(self.ceres_node.hasDataForInterval(None, None))
def test_data_exists_if_slice_covers_interval_completely(self):
with patch('ceres.CeresNode.slices', new=[self.ceres_slices[0]]):
self.assertTrue(self.ceres_node.hasDataForInterval(1200, 1800))
def test_data_exists_if_slice_covers_interval_end(self):
with patch('ceres.CeresNode.slices', new=[self.ceres_slices[0]]):
self.assertTrue(self.ceres_node.hasDataForInterval(600, 1260))
def test_data_exists_if_slice_covers_interval_start(self):
with patch('ceres.CeresNode.slices', new=[self.ceres_slices[0]]):
self.assertTrue(self.ceres_node.hasDataForInterval(1740, 2100))
def test_no_data_exists_if_slice_touches_interval_end(self):
with patch('ceres.CeresNode.slices', new=[self.ceres_slices[0]]):
self.assertFalse(self.ceres_node.hasDataForInterval(600, 1200))
def test_no_data_exists_if_slice_touches_interval_start(self):
with patch('ceres.CeresNode.slices', new=[self.ceres_slices[0]]):
self.assertFalse(self.ceres_node.hasDataForInterval(1800, 2100))
def test_compact_returns_empty_if_passed_empty(self):
self.assertEqual([], self.ceres_node.compact([]))
def test_compact_filters_null_values(self):
self.assertEqual([], self.ceres_node.compact([(60, None)]))
def test_compact_rounds_timestamps_down_to_step(self):
self.assertEqual([[(600, 0)]], self.ceres_node.compact([(605, 0)]))
def test_compact_drops_duplicate_timestamps(self):
datapoints = [(600, 0), (600, 0)]
compacted = self.ceres_node.compact(datapoints)
self.assertEqual([[(600, 0.0)]], compacted)
def test_compact_keeps_last_seen_duplicate_timestamp(self):
datapoints = [(600, 0), (600, 1), (660, 1), (660, 0)]
compacted = self.ceres_node.compact(datapoints)
self.assertEqual([[(600, 1.0), (660, 0.0)]], compacted)
def test_compact_groups_contiguous_points(self):
datapoints = [(600, 0), (660, 0), (840, 0)]
compacted = self.ceres_node.compact(datapoints)
self.assertEqual([[(600, 0), (660, 0)], [(840, 0)]], compacted)
def test_write_noops_if_no_datapoints(self):
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write([])
self.assertFalse(self.ceres_slices[0].write.called)
def test_write_within_first_slice(self):
datapoints = [(1200, 0.0), (1260, 1.0), (1320, 2.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.ceres_slices[0].write.assert_called_once_with(datapoints)
@patch('ceres.CeresSlice.create')
def test_write_within_first_slice_doesnt_create(self, slice_create_mock):
datapoints = [(1200, 0.0), (1260, 1.0), (1320, 2.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.assertFalse(slice_create_mock.called)
@patch('ceres.CeresSlice.create', new=Mock())
def test_write_within_first_slice_with_gaps(self):
datapoints = [(1200, 0.0), (1320, 2.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
# sorted most recent first
calls = [call.write([datapoints[1]]), call.write([datapoints[0]])]
self.ceres_slices[0].assert_has_calls(calls)
@patch('ceres.CeresSlice.create', new=Mock())
def test_write_within_previous_slice(self):
datapoints = [(720, 0.0), (780, 2.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
# 2nd slice has this range
self.ceres_slices[1].write.assert_called_once_with(datapoints)
@patch('ceres.CeresSlice.create')
def test_write_within_previous_slice_doesnt_create(self, slice_create_mock):
datapoints = [(720, 0.0), (780, 2.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.assertFalse(slice_create_mock.called)
@patch('ceres.CeresSlice.create', new=Mock())
def test_write_within_previous_slice_with_gaps(self):
datapoints = [(720, 0.0), (840, 2.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
calls = [call.write([datapoints[1]]), call.write([datapoints[0]])]
self.ceres_slices[1].assert_has_calls(calls)
@patch('ceres.CeresSlice.create', new=Mock())
def test_write_across_slice_boundaries(self):
datapoints = [(1080, 0.0), (1140, 1.0), (1200, 2.0), (1260, 3.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.ceres_slices[0].write.assert_called_once_with(datapoints[2:4])
self.ceres_slices[1].write.assert_called_once_with(datapoints[0:2])
@patch('ceres.CeresSlice.create')
def test_write_before_earliest_slice_creates_new(self, slice_create_mock):
datapoints = [(300, 0.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
slice_create_mock.assert_called_once_with(self.ceres_node, 300, 60)
@patch('ceres.CeresSlice.create')
def test_write_before_earliest_slice_writes_to_new_one(self, slice_create_mock):
datapoints = [(300, 0.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
slice_create_mock.return_value.write.assert_called_once_with(datapoints)
@patch('ceres.CeresSlice.create')
def test_write_before_earliest_slice_writes_next_slice_too(self, slice_create_mock):
# slice 0 starts at 600
datapoints = [(540, 0.0), (600, 0.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.ceres_slices[1].write.assert_called_once_with([datapoints[1]])
@patch('ceres.CeresSlice.create')
def test_create_during_write_clears_slice_cache(self, slice_create_mock):
self.ceres_node.setSliceCachingBehavior('all')
self.ceres_node.sliceCache = self.ceres_slices
datapoints = [(300, 0.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.write(datapoints)
self.assertEquals(None, self.ceres_node.sliceCache)
@patch('ceres.CeresSlice.create')
def test_write_past_max_gap_size_creates(self, slice_create_mock):
datapoints = [(6000, 0.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
with patch.object(self.ceres_slices[0], 'write', side_effect=SliceGapTooLarge):
self.ceres_node.write(datapoints)
@patch('ceres.CeresSlice.create')
def test_write_different_timestep_creates(self, slice_create_mock):
datapoints = [(600, 0.0)]
with patch('ceres.CeresNode.slices', new=self.ceres_slices):
self.ceres_node.timeStep = 10
self.ceres_node.write(datapoints)
slice_create_mock.assert_called_once_with(self.ceres_node, 600, 10)
class CeresNodeReadTest(TestCase):
def setUp(self):
with patch('ceres.isdir', new=Mock(return_value=True)):
with patch('ceres.exists', new=Mock(return_value=True)):
self.ceres_tree = CeresTree('/graphite/storage/ceres')
self.ceres_node = CeresNode(
self.ceres_tree,
'sample_metric',
'/graphite/storage/ceres/sample_metric')
self.ceres_node.timeStep = 60
slice_configs = [
(1200, 1800, 60),
(600, 1200, 60)]
self.ceres_slices = []
for start, end, step in slice_configs:
slice_mock = make_slice_mock(start, end, step)
self.ceres_slices.append(slice_mock)
self.ceres_slices_patch = patch('ceres.CeresNode.slices', new=iter(self.ceres_slices))
self.ceres_slices_patch.start()
def tearDown(self):
self.ceres_slices_patch.stop()
def test_read_loads_metadata_if_timestep_unknown(self):
with patch('ceres.CeresNode.readMetadata', new=Mock(side_effect=Exception))\
as read_metadata_mock:
self.ceres_node.timeStep = None
try: # Raise Exception as a cheap exit out of the function once we have the call we want
self.ceres_node.read(600, 660)
except Exception:
pass
read_metadata_mock.assert_called_once_with()
def test_read_normalizes_from_time(self):
self.ceres_node.read(1210, 1260)
self.ceres_slices[0].read.assert_called_once_with(1200, 1260)
def test_read_normalizes_until_time(self):
self.ceres_node.read(1200, 1270)
self.ceres_slices[0].read.assert_called_once_with(1200, 1260)
def test_read_returns_empty_time_series_if_before_slices(self):
result = self.ceres_node.read(0, 300)
self.assertEqual([None] * 5, result.values)
def test_read_returns_empty_time_series_if_slice_has_no_data(self):
self.ceres_slices[0].read.side_effect = NoData
result = self.ceres_node.read(1200, 1500)
self.assertEqual([None] * 5, result.values)
def test_read_pads_points_missing_before_series(self):
result = self.ceres_node.read(540, 1200)
self.assertEqual([None, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], result.values)
def test_read_pads_points_missing_after_series(self):
result = self.ceres_node.read(1200, 1860)
self.assertEqual(None, result.values[-1])
def test_read_goes_across_slices(self):
self.ceres_node.read(900, 1500)
self.ceres_slices[0].read.assert_called_once_with(1200, 1500)
self.ceres_slices[1].read.assert_called_once_with(900, 1200)
def test_read_across_slices_merges_results(self):
result = self.ceres_node.read(900, 1500)
self.assertEqual([0, 1, 2, 3, 4, 0, 1, 2, 3, 4], result.values)
def test_read_pads_points_missing_after_series_across_slices(self):
result = self.ceres_node.read(900, 1860)
self.assertEqual(None, result.values[-1])
def test_read_pads_points_missing_between_slices(self):
self.ceres_slices[1] = make_slice_mock(600, 1140, 60)
result = self.ceres_node.read(900, 1500)
self.assertEqual([0, 1, 2, 3, None, 0, 1, 2, 3, 4], result.values)
class CeresSliceTest(TestCase):
def setUp(self):
with patch('ceres.isdir', new=Mock(return_value=True)):
with patch('ceres.exists', new=Mock(return_value=True)):
self.ceres_tree = CeresTree('/graphite/storage/ceres')
self.ceres_node = CeresNode(
self.ceres_tree,
'sample_metric',
'/graphite/storage/ceres/sample_metric')
def test_init_sets_fspath_name(self):
ceres_slice = CeresSlice(self.ceres_node, 0, 60)
self.assertTrue(ceres_slice.fsPath.endswith('0@60.slice'))
@patch('ceres.getsize')
def test_end_time_calculated_via_filesize(self, getsize_mock):
getsize_mock.return_value = DATAPOINT_SIZE * 300
ceres_slice = CeresSlice(self.ceres_node, 0, 60)
# 300 points at 60 sec per point
self.assertEqual(300 * 60, ceres_slice.endTime)
@patch('ceres.exists')
def test_delete_before_raises_if_deleted(self, exists_mock):
exists_mock.return_value = False
ceres_slice = CeresSlice(self.ceres_node, 0, 60)
self.assertRaises(SliceDeleted, ceres_slice.deleteBefore, 60)
@patch('ceres.exists', Mock(return_value=True))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_delete_before_returns_if_time_earlier_than_start(self, open_mock):
ceres_slice = CeresSlice(self.ceres_node, 300, 60)
# File starts at timestamp 300, delete points before timestamp 60
ceres_slice.deleteBefore(60)
open_mock.assert_has_calls([]) # no calls
@patch('ceres.exists', Mock(return_value=True))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_delete_before_returns_if_time_less_than_step_earlier_than_start(self, open_mock):
ceres_slice = CeresSlice(self.ceres_node, 300, 60)
ceres_slice.deleteBefore(299)
open_mock.assert_has_calls([])
@patch('ceres.exists', Mock(return_value=True))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_delete_before_returns_if_time_same_as_start(self, open_mock):
ceres_slice = CeresSlice(self.ceres_node, 300, 60)
ceres_slice.deleteBefore(300)
open_mock.assert_has_calls([])
@patch('ceres.exists', Mock(return_value=True))
@patch('ceres.os.rename', Mock(return_value=True))
def test_delete_before_clears_slice_cache(self):
ceres_slice = CeresSlice(self.ceres_node, 300, 60)
open_mock = mock_open(read_data='foo') # needs to be non-null for this test
with patch.object(builtins, 'open', open_mock):
with patch('ceres.CeresNode.clearSliceCache') as clear_slice_cache_mock:
ceres_slice.deleteBefore(360)
clear_slice_cache_mock.assert_called_once_with()
@patch('ceres.exists', Mock(return_value=True))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_delete_before_deletes_file_if_no_more_data(self, open_mock):
ceres_slice = CeresSlice(self.ceres_node, 300, 60)
with patch('ceres.os.unlink') as unlink_mock:
try:
ceres_slice.deleteBefore(360)
except Exception:
pass
self.assertTrue(unlink_mock.called)
@patch('ceres.exists', Mock(return_value=True))
@patch('ceres.os.unlink', Mock())
@patch.object(builtins, 'open', new_callable=mock_open)
def test_delete_before_raises_slice_deleted_if_no_more_data(self, open_mock):
ceres_slice = CeresSlice(self.ceres_node, 300, 60)
self.assertRaises(SliceDeleted, ceres_slice.deleteBefore, 360)
@patch('ceres.exists', Mock(return_value=True))
@patch('ceres.os.rename', Mock())
def test_delete_before_seeks_to_time(self):
ceres_slice = CeresSlice(self.ceres_node, 300, 60)
open_mock = mock_open(read_data='foo')
with patch.object(builtins, 'open', open_mock) as open_mock:
ceres_slice.deleteBefore(360)
# Seek from 300 (start of file) to 360 (1 datapointpoint)
open_mock.return_value.seek.assert_any_call(1 * DATAPOINT_SIZE)
@patch('ceres.exists', Mock(return_value=True))
def test_slices_are_sortable(self):
ceres_slices = [
CeresSlice(self.ceres_node, 300, 60),
CeresSlice(self.ceres_node, 600, 60),
CeresSlice(self.ceres_node, 0, 60)]
expected_order = [0, 300, 600]
result_order = [slice.startTime for slice in sorted(ceres_slices)]
self.assertEqual(expected_order, result_order)
class CeresSliceWriteTest(TestCase):
def setUp(self):
with patch('ceres.isdir', new=Mock(return_value=True)):
with patch('ceres.exists', new=Mock(return_value=True)):
self.ceres_tree = CeresTree('/graphite/storage/ceres')
self.ceres_node = CeresNode(
self.ceres_tree,
'sample_metric',
'/graphite/storage/ceres/sample_metric')
self.ceres_slice = CeresSlice(self.ceres_node, 300, 60)
@patch('ceres.getsize', Mock(side_effect=OSError))
def test_raises_os_error_if_not_enoent(self):
self.assertRaises(OSError, self.ceres_slice.write, [(0, 0)])
@patch('ceres.getsize', Mock(side_effect=OSError(errno.ENOENT, 'foo')))
def test_raises_slice_deleted_oserror_enoent(self):
self.assertRaises(SliceDeleted, self.ceres_slice.write, [(0, 0)])
@patch('ceres.getsize', Mock(return_value=0))
@patch.object(builtins, 'open', mock_open())
def test_raises_slice_gap_too_large_when_it_is(self):
# one point over the max
new_time = self.ceres_slice.startTime + self.ceres_slice.timeStep * (MAX_SLICE_GAP + 1)
datapoint = (new_time, 0)
self.assertRaises(SliceGapTooLarge, self.ceres_slice.write, [datapoint])
@patch('ceres.getsize', Mock(return_value=0))
@patch.object(builtins, 'open', mock_open())
def test_doesnt_raise_slice_gap_too_large_when_it_isnt(self):
new_time = self.ceres_slice.startTime + self.ceres_slice.timeStep * (MAX_SLICE_GAP - 1)
datapoint = (new_time, 0)
try:
self.ceres_slice.write([datapoint])
except SliceGapTooLarge:
self.fail("SliceGapTooLarge raised")
@patch('ceres.getsize', Mock(return_value=DATAPOINT_SIZE * 100))
@patch.object(builtins, 'open', mock_open())
def test_doesnt_raise_slice_gap_when_newer_points_exist(self):
new_time = self.ceres_slice.startTime + self.ceres_slice.timeStep * (MAX_SLICE_GAP + 1)
datapoint = (new_time, 0)
try:
self.ceres_slice.write([datapoint])
except SliceGapTooLarge:
self.fail("SliceGapTooLarge raised")
@patch('ceres.getsize', Mock(return_value=0))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_raises_ioerror_if_seek_hits_ioerror(self, open_mock):
open_mock.return_value.seek.side_effect = IOError
self.assertRaises(IOError, self.ceres_slice.write, [(300, 0)])
@patch('ceres.getsize', Mock(return_value=0))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_opens_file_as_binary(self, open_mock):
self.ceres_slice.write([(300, 0)])
# call_args = (args, kwargs)
self.assertTrue(open_mock.call_args[0][1].endswith('b'))
@patch('ceres.getsize', Mock(return_value=0))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_seeks_to_the_correct_offset_first_point(self, open_mock):
self.ceres_slice.write([(300, 0)])
open_mock.return_value.seek.assert_called_once_with(0)
@patch('ceres.getsize', Mock(return_value=1 * DATAPOINT_SIZE))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_seeks_to_the_correct_offset_next_point(self, open_mock):
self.ceres_slice.write([(360, 0)])
# 2nd point in the file
open_mock.return_value.seek.assert_called_once_with(DATAPOINT_SIZE)
@patch('ceres.getsize', Mock(return_value=1 * DATAPOINT_SIZE))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_seeks_to_the_next_empty_offset_one_point_gap(self, open_mock):
# Gaps are written out as NaNs so the offset we expect is the beginning
# of the gap, not the offset of the point itself
self.ceres_slice.write([(420, 0)])
open_mock.return_value.seek.assert_called_once_with(1 * DATAPOINT_SIZE)
@patch('ceres.getsize', Mock(return_value=0))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_correct_size_written_first_point(self, open_mock):
self.ceres_slice.write([(300, 0)])
self.assertEqual(1 * DATAPOINT_SIZE, len(fetch_mock_open_writes(open_mock)))
@patch('ceres.getsize', Mock(return_value=1 * DATAPOINT_SIZE))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_correct_size_written_next_point(self, open_mock):
self.ceres_slice.write([(360, 0)])
self.assertEqual(1 * DATAPOINT_SIZE, len(fetch_mock_open_writes(open_mock)))
@patch('ceres.getsize', Mock(return_value=1 * DATAPOINT_SIZE))
@patch.object(builtins, 'open', new_callable=mock_open)
def test_correct_size_written_one_point_gap(self, open_mock):
self.ceres_slice.write([(420, 0)])
# one empty point, one real point = two points total written
self.assertEqual(2 * DATAPOINT_SIZE, len(fetch_mock_open_writes(open_mock)))
class CeresArchiveNodeReadTest(TestCase):
def setUp(self):
with patch('ceres.isdir', new=Mock(return_value=True)):
with patch('ceres.exists', new=Mock(return_value=True)):
self.ceres_tree = CeresTree('/graphite/storage/ceres')
self.ceres_node = CeresNode(
self.ceres_tree,
'sample_metric',
'/graphite/storage/ceres/sample_metric')
self.ceres_node.timeStep = 30
slice_configs = [
(1200, 1800, 30),
(600, 1200, 60)]
self.ceres_slices = []
for start, end, step in slice_configs:
slice_mock = make_slice_mock(start, end, step)
self.ceres_slices.append(slice_mock)
self.ceres_slices_patch = patch('ceres.CeresNode.slices', new=iter(self.ceres_slices))
self.ceres_slices_patch.start()
def tearDown(self):
self.ceres_slices_patch.stop()
def test_archives_read_loads_metadata_if_timestep_unknown(self):
with patch('ceres.CeresNode.readMetadata', new=Mock(side_effect=Exception))\
as read_metadata_mock:
self.ceres_node.timeStep = None
try: # Raise Exception as a cheap exit out of the function once we have the call we want
self.ceres_node.read(600, 660)
except Exception:
pass
read_metadata_mock.assert_called_once_with()
def test_archives_read_normalizes_from_time(self):
self.ceres_node.read(1210, 1260)
self.ceres_slices[0].read.assert_called_once_with(1200, 1260)
def test_archives_read_normalizes_until_time(self):
self.ceres_node.read(1200, 1270)
self.ceres_slices[0].read.assert_called_once_with(1200, 1260)
def test_archives_read_returns_empty_time_series_if_before_slices(self):
result = self.ceres_node.read(0, 300)
self.assertEqual([None] * 10, result.values)
def test_archives_read_returns_empty_time_series_if_slice_has_no_data(self):
self.ceres_slices[0].read.side_effect = NoData
result = self.ceres_node.read(1200, 1500)
self.assertEqual([None] * 10, result.values)
def test_archives_read_pads_points_missing_before_series(self):
result = self.ceres_node.read(300, 1200)
self.assertEqual(None, result.values[0])
def test_archives_read_pads_points_missing_after_series(self):
result = self.ceres_node.read(1200, 1860)
self.assertEqual(None, result.values[-1])
def test_archives_read_goes_across_slices(self):
self.ceres_node.read(900, 1500)
self.ceres_slices[0].read.assert_called_once_with(1200, 1500)
self.ceres_slices[1].read.assert_called_once_with(900, 1200)
def test_archives_read_across_slices_merges_results_average(self):
result = self.ceres_node.read(900, 1470)
self.assertEqual([0, 1, 2, 3, 4, 0.5, 2.5, 4.5, 6.5, 8], result.values)
def test_archives_read_across_slices_merges_results_sum(self):
self.ceres_node.aggregationMethod = 'sum'
result = self.ceres_node.read(900, 1470)
self.assertEqual([0, 1, 2, 3, 4, 1, 5, 9, 13, 8], result.values)
def test_archives_read_across_slices_merges_results_last(self):
self.ceres_node.aggregationMethod = 'last'
result = self.ceres_node.read(900, 1470)
self.assertEqual([0, 1, 2, 3, 4, 1, 3, 5, 7, 8], result.values)
def test_archives_read_across_slices_merges_results_max(self):
self.ceres_node.aggregationMethod = 'max'
result = self.ceres_node.read(900, 1470)
self.assertEqual([0, 1, 2, 3, 4, 1, 3, 5, 7, 8], result.values)
def test_archives_read_across_slices_merges_results_min(self):
self.ceres_node.aggregationMethod = 'min'
result = self.ceres_node.read(900, 1470)
self.assertEqual([0, 1, 2, 3, 4, 0, 2, 4, 6, 8], result.values)
def test_archives_invalid_aggregation_method(self):
self.ceres_node.aggregationMethod = 'invalid'
self.assertRaises(InvalidAggregationMethod, self.ceres_node.read, 900, 1500)
def test_archives_read_pads_points_missing_after_series_across_slices(self):
result = self.ceres_node.read(900, 1860)
self.assertEqual(None, result.values[-1])
def test_archives_read_pads_points_missing_between_slices(self):
self.ceres_slices[1] = make_slice_mock(600, 900, 300)
result = self.ceres_node.read(600, 1500)
self.assertEqual([0, None, 4.5], result.values)
| |
import base64
from hashlib import sha1
import types
import socket
try:
from gevent.coros import Semaphore as Lock
except ImportError:
from threading import Lock
from ws4py import WS_KEY
from ws4py.exc import HandshakeError
from ws4py.streaming import Stream
WS_VERSION = 8
class WebSocket(object):
"""WebSocket API for handlers
This provides a socket-like interface similar to the browser
WebSocket API for managing a WebSocket connection.
"""
def __init__(self, sock, protocols, extensions, environ):
self.stream = Stream()
self.protocols = protocols
self.extensions = extensions
self.environ = environ
self.sock = sock
self.sock.settimeout(30.0)
self.client_terminated = False
self.server_terminated = False
self._lock = Lock()
def close(self, code=1000, reason=''):
"""
Call this method to initiate the websocket connection
closing by sending a close frame to the connected peer.
Once this method is called, the server_terminated
attribute is set. Calling this method several times is
safe as the closing frame will be sent only the first
time.
@param code: status code describing why the connection is closed
@param reason: a human readable message describing why the connection is closed
"""
if not self.server_terminated:
self.server_terminated = True
self.write_to_connection(self.stream.close(code=code, reason=reason))
self.close_connection()
@property
def terminated(self):
"""
Returns True if both the client and server have been
marked as terminated.
"""
return self.client_terminated is True and self.server_terminated is True
def write_to_connection(self, bytes):
"""
Writes the provided bytes to the underlying connection.
@param bytes: data tio send out
"""
return self.sock.sendall(bytes)
def read_from_connection(self, amount):
"""
Reads bytes from the underlying connection.
@param amount: quantity to read (if possible)
"""
return self.sock.recv(amount)
def close_connection(self):
"""
Shutdowns then closes the underlying connection.
"""
try:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
except:
pass
def send(self, payload, binary=False):
"""
Sends the given payload out.
If payload is some bytes or a bytearray,
then it is sent as a single message not fragmented.
If payload is a generator, each chunk is sent as part of
fragmented message.
@param payload: string, bytes, bytearray or a generator
@param binary: if set, handles the payload as a binary message
"""
if isinstance(payload, basestring) or isinstance(payload, bytearray):
if not binary:
self.write_to_connection(self.stream.text_message(payload).single())
else:
self.write_to_connection(self.stream.binary_message(payload).single())
elif type(payload) == types.GeneratorType:
bytes = payload.next()
first = True
for chunk in payload:
if not binary:
self.write_to_connection(self.stream.text_message(bytes).fragment(first=first))
else:
self.write_to_connection(self.stream.binary_message(payload).fragment(first=first))
bytes = chunk
first = False
if not binary:
self.write_to_connection(self.stream.text_message(bytes).fragment(last=True))
else:
self.write_to_connection(self.stream.text_message(bytes).fragment(last=True))
def receive(self, msg_obj=False):
"""
Performs the operation of reading from the underlying
connection in order to feed the stream of bytes.
We start with a small size of two bytes to be read
from the connection so that we can quickly parse an
incoming frame header. Then the stream indicates
whatever size must be read from the connection since
it knows the frame payload length.
Note that we perform some automatic opererations:
* On a closing message, we respond with a closing
message and finally close the connection
* We respond to pings with pong messages.
* Whenever an error is raised by the stream parsing,
we initiate the closing of the connection with the
appropiate error code.
"""
next_size = 2
try:
while not self.terminated:
bytes = self.read_from_connection(next_size)
if not bytes and next_size > 0:
raise IOError()
message = None
with self._lock:
s = self.stream
next_size = s.parser.send(bytes)
if s.closing is not None:
if not self.server_terminated:
next_size = 2
self.close(s.closing.code, s.closing.reason)
else:
self.client_terminated = True
raise IOError()
elif s.errors:
errors = s.errors[:]
for error in s.errors:
self.close(error.code, error.reason)
s.errors.remove(error)
raise IOError()
elif s.has_message:
if msg_obj:
message = s.message
s.message = None
else:
message = str(s.message)
s.message.data = None
s.message = None
for ping in s.pings:
self.write_to_connection(s.pong(str(ping.data)))
s.pings = []
s.pongs = []
if message is not None:
return message
except IOError, e:
self.client_terminated = self.server_terminated = True
self.close_connection()
return None
class WebSocketUpgradeMiddleware(object):
"""WSGI middleware for handling WebSocket upgrades"""
def __init__(self, handle, fallback_app=None, protocols=None, extensions=None,
websocket_class=WebSocket):
self.handle = handle
self.fallback_app = fallback_app
self.protocols = protocols
self.extensions = extensions
self.websocket_class = websocket_class
def __call__(self, environ, start_response):
# Initial handshake validation
try:
if 'websocket' not in environ.get('upgrade.protocol'):
raise HandshakeError("Upgrade protocol is not websocket")
if environ.get('REQUEST_METHOD') != 'GET':
raise HandshakeError('Method is not GET')
key = environ.get('HTTP_SEC_WEBSOCKET_KEY')
if key:
ws_key = base64.b64decode(key)
if len(ws_key) != 16:
raise HandshakeError("WebSocket key's length is invalid")
else:
raise HandshakeError("Not a valid HyBi WebSocket request")
version = environ.get('HTTP_SEC_WEBSOCKET_VERSION')
if version:
if version != str(WS_VERSION):
raise HandshakeError('Unsupported WebSocket version')
environ['websocket.version'] = str(WS_VERSION)
else:
raise HandshakeError('WebSocket version required')
except HandshakeError, e:
if self.fallback_app:
return self.fallback_app(environ, start_response)
else:
start_response("400 Bad Handshake", [])
return [str(e)]
# Collect supported subprotocols
protocols = self.protocols or []
subprotocols = environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL')
ws_protocols = []
if subprotocols:
for s in subprotocols.split(','):
s = s.strip()
if s in protocols:
ws_protocols.append(s)
# Collect supported extensions
exts = self.extensions or []
ws_extensions = []
extensions = environ.get('HTTP_SEC_WEBSOCKET_EXTENSIONS')
if extensions:
for ext in extensions.split(','):
ext = ext.strip()
if ext in exts:
ws_extensions.append(ext)
# Build and start the HTTP response
headers = [
('Upgrade', 'websocket'),
('Connection', 'Upgrade'),
('Sec-WebSocket-Version', environ['websocket.version']),
('Sec-WebSocket-Accept', base64.b64encode(sha1(key + WS_KEY).digest())),
]
if ws_protocols:
headers.append(('Sec-WebSocket-Protocol', ', '.join(ws_protocols)))
if ws_extensions:
headers.append(('Sec-WebSocket-Extensions', ','.join(ws_extensions)))
start_response("101 Web Socket Hybi Handshake", headers)
# Build a websocket object and pass it to the handler
self.handle(
self.websocket_class(
environ.get('upgrade.socket'),
ws_protocols,
ws_extensions,
environ),
environ)
| |
#!/usr/bin/env python3
""" Script to generate Serial (UART) parameters and the ROMFS startup script """
from __future__ import print_function
import argparse
import os
import sys
try:
from jinja2 import Environment, FileSystemLoader
except ImportError as e:
print("Failed to import jinja2: " + str(e))
print("")
print("You may need to install it using:")
print(" pip3 install --user jinja2")
print("")
sys.exit(1)
try:
import yaml
except ImportError as e:
print("Failed to import yaml: " + str(e))
print("")
print("You may need to install it using:")
print(" pip3 install --user pyyaml")
print("")
sys.exit(1)
## Configuration
# All possible Serial ports
# Note: do not re-use or change indexes. When adding a port, always use an
# index that has never been used before. This is important for compatibility
# with QGC (parameter metadata)
serial_ports = {
# index 0 means disabled
# index 1000 means ethernet condiguration
# Generic
# "URT1": {
# "label": "UART 1",
# "index": 1,
# "default_baudrate": 57600,
# },
# "URT2": {
# "label": "UART 2",
# "index": 2,
# "default_baudrate": 57600,
# },
# "URT3": {
# "label": "UART 3",
# "index": 3,
# "default_baudrate": 57600,
# },
# "URT4": {
# "label": "UART 4",
# "index": 4,
# "default_baudrate": 57600,
# },
# "URT5": {
# "label": "UART 5",
# "index": 5,
# "default_baudrate": 57600,
# },
"URT6": {
"label": "UART 6",
"index": 6,
"default_baudrate": 57600,
},
# "URT7": {
# "label": "UART 7",
# "index": 7,
# "default_baudrate": 57600,
# },
# "URT8": {
# "label": "UART 8",
# "index": 8,
# "default_baudrate": 57600,
# },
# "URT9": {
# "label": "UART 9",
# "index": 9,
# "default_baudrate": 57600,
# },
# Telemetry Ports
"TEL1": { # telemetry link
"label": "TELEM 1",
"index": 101,
"default_baudrate": 57600,
},
"TEL2": { # companion port
"label": "TELEM 2",
"index": 102,
"default_baudrate": 921600,
},
"TEL3": {
"label": "TELEM 3",
"index": 103,
"default_baudrate": 57600,
},
"TEL4": {
"label": "TELEM/SERIAL 4",
"index": 104,
"default_baudrate": 57600,
},
# GPS Ports
"GPS1": {
"label": "GPS 1",
"index": 201,
"default_baudrate": 0,
},
"GPS2": {
"label": "GPS 2",
"index": 202,
"default_baudrate": 0,
},
"GPS3": {
"label": "GPS 3",
"index": 203,
"default_baudrate": 0,
},
# RC Port
"RC": {
"label": "Radio Controller",
"index": 300,
"default_baudrate": 0,
},
# WIFI Port (PixRacer)
"WIFI": {
"label": "Wifi Port",
"index": 301,
"default_baudrate": 1, # set default to an unusable value to detect that this serial port has not been configured
},
}
parser = argparse.ArgumentParser(description='Generate Serial params & startup script')
parser.add_argument('--serial-ports', type=str, nargs='*', metavar="TAG:DEVICE",
default=[],
help='Serial ports: mappings from the tag name to the device (e.g. GPS1:/dev/ttyS1)')
parser.add_argument('--config-files', type=str, nargs='*', default=[],
help='YAML module config file(s)')
parser.add_argument('--all-ports', action='store_true',
help='Generate output for all known ports (params file only)')
parser.add_argument('--constrained-flash', action='store_true',
help='Reduce verbosity in ROMFS scripts to reduce flash size')
parser.add_argument('--rc-dir', type=str, action='store',
help='ROMFS output directory', default=None)
parser.add_argument('--params-file', type=str, action='store',
help='Parameter output file', default=None)
parser.add_argument('--ethernet', action='store_true',
help='Ethernet support')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='Verbose Output')
args = parser.parse_args()
arg_board_serial_ports = args.serial_ports
verbose = args.verbose
rc_serial_output_dir = args.rc_dir
rc_serial_template = 'rc.serial.jinja'
rc_serial_port_template = 'rc.serial_port.jinja'
serial_params_output_file = args.params_file
serial_params_template = 'serial_params.c.jinja'
generate_for_all_ports = args.all_ports
constrained_flash = args.constrained_flash
ethernet_supported = args.ethernet
if generate_for_all_ports:
board_ports = [(key, "") for key in serial_ports]
else:
# convert arg_board_serial_ports list [ "TAG:DEVICE" ] into [ ("TAG", "DEVICE") ]
board_ports = [tuple(port.split(":")) for port in arg_board_serial_ports]
if rc_serial_output_dir is None and serial_params_output_file is None:
raise Exception("At least one of --rc-dir or --params-file "
"(e.g. serial_params.c) needs to be specified")
# parse the YAML files
serial_commands = []
ethernet_configuration = []
additional_params = ""
if ethernet_supported:
ethernet_configuration.append({
'tag': "ETH",
'label': "Ethernet",
'index': 1000
})
def parse_yaml_serial_config(yaml_config):
""" parse the serial_config section from the yaml config file """
if 'serial_config' not in yaml_config:
return []
ret = []
module_name = yaml_config['module_name']
for serial_config in yaml_config['serial_config']:
if 'label' not in serial_config:
serial_config['label'] = module_name
ret.append(serial_config)
return ret
def parse_yaml_parameters_config(yaml_config, ethernet_supported):
""" parse the parameters section from the yaml config file """
if 'parameters' not in yaml_config:
return ''
parameters_section_list = yaml_config['parameters']
for parameters_section in parameters_section_list:
if 'definitions' not in parameters_section:
return ''
definitions = parameters_section['definitions']
ret = ''
param_group = parameters_section.get('group', None)
for param_name in definitions:
param = definitions[param_name]
if param.get('requires_ethernet', False) and not ethernet_supported:
continue
num_instances = param.get('num_instances', 1)
instance_start = param.get('instance_start', 0) # offset
# get the type and extract all tags
tags = '@group {:}'.format(param_group)
if param['type'] == 'enum':
param_type = 'INT32'
for key in param['values']:
tags += '\n * @value {:} {:}'.format(key, param['values'][key])
elif param['type'] == 'boolean':
param_type = 'INT32'
tags += '\n * @boolean'
elif param['type'] == 'int32':
param_type = 'INT32'
elif param['type'] == 'float':
param_type = 'FLOAT'
else:
raise Exception("unknown param type {:}".format(param['type']))
for tag in ['decimal', 'increment', 'category', 'volatile', 'bit',
'min', 'max', 'unit', 'reboot_required']:
if tag in param:
tags += '\n * @{:} {:}'.format(tag, param[tag])
for i in range(num_instances):
# default value
default_value = 0
if 'default' in param:
# default can be a list of num_instances or a single value
if type(param['default']) == list:
assert len(param['default']) == num_instances
default_value = param['default'][i]
else:
default_value = param['default']
if type(default_value) == bool:
default_value = int(default_value)
# output the existing C-style format
ret += '''
/**
* {short_descr}
*
* {long_descr}
*
* {tags}
*/
PARAM_DEFINE_{param_type}({name}, {default_value});
'''.format(short_descr=param['description']['short'].replace("\n", "\n * "),
long_descr=param['description']['long'].replace("\n", "\n * "),
tags=tags,
param_type=param_type,
name=param_name,
default_value=default_value,
).replace('${i}', str(i+instance_start))
return ret
for yaml_file in args.config_files:
with open(yaml_file, 'r') as stream:
try:
yaml_config = yaml.load(stream, Loader=yaml.Loader)
serial_commands.extend(parse_yaml_serial_config(yaml_config))
# TODO: additional params should be parsed in a separate script
additional_params += parse_yaml_parameters_config(
yaml_config, ethernet_supported)
except yaml.YAMLError as exc:
print(exc)
raise
# sanity check (makes sure the param names don't exceed the max length of 16 chars)
for key in serial_ports:
if len(key) > 4:
raise Exception("Serial tag {:} is too long (max length=4)".format(key))
serial_devices = []
for tag, device in board_ports:
if tag not in serial_ports:
raise Exception("Unknown serial port {:}. "
"You might have to add it to serial_ports in\n {:}".format(tag,
os.path.realpath(__file__)))
serial_devices.append({
'tag': tag,
'device': device,
'label': serial_ports[tag]["label"],
'index': serial_ports[tag]["index"],
'default_baudrate': serial_ports[tag]["default_baudrate"]
})
# construct commands based on selected board
commands = []
for serial_command in serial_commands:
num_instances = serial_command.get('num_instances', 1)
# TODO: use a loop in the script instead of explicitly enumerating all instances
for i in range(num_instances):
port_config = serial_command['port_config_param']
port_param_name = port_config['name'].replace('${i}', str(i))
# check if a port dependency is specified
if 'depends_on_port' in port_config:
depends_on_port = port_config['depends_on_port']
if not any(p['tag'] == depends_on_port for p in serial_devices):
if verbose:
print("Skipping {:} (missing dependent port)".format(port_param_name))
continue
default_port = 0 # disabled
if 'default' in port_config:
if type(port_config['default']) == list:
assert len(port_config['default']) == num_instances
default_port_str = port_config['default'][i]
else:
default_port_str = port_config['default']
if default_port_str != "":
if default_port_str not in serial_ports:
raise Exception("Default Port {:} not found for {:}".format(default_port_str, serial_command['label']))
default_port = serial_ports[default_port_str]['index']
commands.append({
'command': serial_command['command'],
'label': serial_command['label'],
'instance': i,
'multi_instance': num_instances > 1,
'port_param_name': port_param_name,
'default_port': default_port,
'param_group': port_config['group'],
'description_extended': port_config.get('description_extended', ''),
'supports_networking': serial_command.get('supports_networking', False)
})
if verbose:
print("Serial Devices: {:}".format(serial_devices))
#print("Commands: {:}".format(commands))
jinja_env = Environment(loader=FileSystemLoader(
os.path.dirname(os.path.realpath(__file__))))
# generate the ROMFS script using a jinja template
if rc_serial_output_dir is not None:
if generate_for_all_ports:
raise Exception("Cannot create rc file for --all-ports")
rc_serial_output_file = os.path.join(rc_serial_output_dir, "rc.serial")
rc_serial_port_output_file = os.path.join(rc_serial_output_dir, "rc.serial_port")
if verbose: print("Generating {:}".format(rc_serial_output_file))
if len(serial_devices) == 0:
# if the board has no UARTs, create an empty rc file
open(rc_serial_output_file, 'w').close()
else:
template = jinja_env.get_template(rc_serial_template)
with open(rc_serial_output_file, 'w') as fid:
fid.write(template.render(serial_devices=serial_devices,
commands=commands,
constrained_flash=constrained_flash))
if verbose: print("Generating {:}".format(rc_serial_port_output_file))
template = jinja_env.get_template(rc_serial_port_template)
with open(rc_serial_port_output_file, 'w') as fid:
fid.write(template.render(serial_devices=serial_devices,
ethernet_configuration=ethernet_configuration,
constrained_flash=constrained_flash))
# parameter definitions
if serial_params_output_file is not None:
if verbose: print("Generating {:}".format(serial_params_output_file))
template = jinja_env.get_template(serial_params_template)
with open(serial_params_output_file, 'w') as fid:
fid.write(template.render(serial_devices=serial_devices,
ethernet_configuration=ethernet_configuration,
commands=commands, serial_ports=serial_ports,
additional_definitions=additional_params))
| |
#Calculate joint histogram and related metrics
from math import sin,cos,pi
import numpy as np
from scipy.ndimage import affine_transform, geometric_transform
from scipy.ndimage.interpolation import rotate,shift,zoom
from scipy.optimize import fmin as fmin_simplex, fmin_powell, fmin_cg
from scipy.optimize import leastsq
from dipy.core import geometry as gm
import pylab
def affine_transform2d(I,M):
''' Inspired by the work of Alexis Roche and the independent work of D. Kroon
Parameters
----------
I: array, shape(N,M), 2d image
M: inverse transformation matrix 3x3, array, shape (3,3)
mode:
0: linear interpolation and outside pixels set to nearest pixel
Returns
-------
Iout: array, shape(N,M), transformed image
'''
#the transpose is for contiguous C arrays (default)
#I=I.T
#create all x,y indices
xy=np.array([(i,j) for (i,j) in np.ndindex(I.shape)])
#image center is now our origin (0,0)
mean=np.array(I.shape)/2.
mean=mean.reshape(1,2)
xyd=xy-mean
#transformed coordinates
lxy = mean.T + np.dot(M[:2,:2],xyd.T) + M[:2,2].reshape(2,1)
lxy=lxy.T
#neighborh pixels for linear interp
bas0=np.floor(lxy)
bas1=bas0+1
#linear interp. constants
com=lxy-bas0
perc0=(1-com[:,0])*(1-com[:,1])
perc1=(1-com[:,0])*com[:,1]
perc2=com[:,0]*(1-com[:,1])
perc3=com[:,0]*com[:,1]
#create final image
Iout=np.zeros(I.shape)
#zeroing indices outside boundaries
check_xbas0=np.where(np.bitwise_or(bas0[:,0]<0,bas0[:,0]>=I.shape[0]))
check_ybas0=np.where(np.bitwise_or(bas0[:,1]<0,bas0[:,1]>=I.shape[1]))
bas0[check_xbas0,0]=0
bas0[check_ybas0,1]=0
check_xbas1=np.where(np.bitwise_or(bas1[:,0]<0,bas1[:,0]>=I.shape[0]))
check_ybas1=np.where(np.bitwise_or(bas1[:,1]<0,bas1[:,1]>=I.shape[1]))
bas1[check_xbas1,0]=0
bas1[check_ybas1,1]=0
#hold shape
Ish=I.shape[0]
#ravel image
Ione=I.ravel()
#new intensities
xyz0=Ione[(bas0[:,0]+bas0[:,1]*Ish).astype('int')]
xyz1=Ione[(bas0[:,0]+bas1[:,1]*Ish).astype('int')]
xyz2=Ione[(bas1[:,0]+bas0[:,1]*Ish).astype('int')]
xyz3=Ione[(bas1[:,0]+bas1[:,1]*Ish).astype('int')]
#kill mirroring
#xyz0[np.bitwise_or(check_xbas0,check_ybas0)]=0
#xyz1[np.bitwise_or(check_xbas0,check_ybas1)]=0
#xyz2[np.bitwise_or(check_xbas1,check_ybas0)]=0
#xyz3[np.bitwise_or(check_xbas1,check_ybas1)]=0
#apply recalculated intensities
Iout=xyz0*perc0+xyz1*perc1+xyz2*perc2+xyz3*perc3
return Iout.reshape(I.shape)
def joint_histogram(A,B,binA,binB):
''' Calculate joint histogram and individual histograms for A and B
ndarrays
Parameters
----------
A, B: ndarrays
binA, binB: 1d arrays with the bins
Returns
-------
JH: joint histogram
HA: histogram for A
HB: histogram for B
Example
-------
>>> A=np.array([[1,.5,.2,0,0],[.5,1,.5,0,0],[.2,.5,1,0,0],[0,0,0,0,0],[0,0,0,0,0]])
>>> B=np.array([[0,0,0,0,0],[0,1,.5,.2,0],[0,.5,1,.5,0],[0,.2,.5,1,0],[0,0,0,0,0]])
>>> bin_A=np.array([-np.Inf,.1,.35,.75,np.Inf])
>>> bin_B=np.array([-np.Inf,.1,.35,.75,np.Inf])
>>> JH,HA,HB=joint_histogram(A,B,bin_A,bin_B)
'''
A=A.ravel()
B=B.ravel()
A2=A.copy()
B2=B.copy()
#assign bins
for i in range(1,len(binA)):
Ai=np.where(np.bitwise_and(A>binA[i-1],A<=binA[i]))
A2[Ai]=i-1
for i in range(1,len(binB)):
Bi=np.where(np.bitwise_and(B>binB[i-1],B<=binB[i]))
B2[Bi]=i-1
JH=np.zeros((len(binA)-1,len(binB)-1))
#calculate joint histogram
for i in range(len(A)):
JH[A2[i],B2[i]]+=1
#calculate histogram for A
HA=np.zeros(len(binA)-1)
for i in range(len(A)):
HA[A2[i]]+=1
#calculate histogram for B
HB=np.zeros(len(binB)-1)
for i in range(len(B)):
HB[B2[i]]+=1
return JH,HA,HB
def mutual_information(A,B,binA,binB):
''' Calculate mutual information for A and B
'''
JH,HA,HB=joint_histogram(A,B,binA,binB)
N=float(len(A.ravel()))
MI=np.zeros(JH.shape)
#print N
for i in range(JH.shape[0]):
for j in range(JH.shape[1]):
Pij= JH[i,j]/N
Pi = HA[i]/N
Pj= HB[j]/N
#print i,j, Pij, Pi, Pj, JH[i,j], HA[i], HB[j]
MI[i,j]=Pij*np.log2(Pij/(Pi*Pj))
MI[np.isnan(MI)]=0
return MI.sum()
def apply_mapping(A,T,order=0,map_type='affine2d'):
''' Apply mapping
'''
if map_type=='affine2d':
#create the different components
#translation[2], scale[2], rotation[1], shear[2]
if len(T)==7:
tc1,tc2,sc1,sc2,rc,sch1,sch2=T
if len(T)==5:
tc1,tc2,sc1,sc2,rc=T
sch1,sch2=(0,0)
if len(T)==4:
tc1,tc2,rc,sc=T
sc1,sc2,sch1,sch2=(sc,sc,1,1)
if len(T)==3:
tc1,tc2,rc=T
sc1,sc2,sch1,sch2=(1,1,0,0)
#translation
TC=np.matrix([[1,0,tc1],
[0,1,tc2],
[0,0, 1]])
#scaling
SC=np.matrix([[sc1, 0, 0],
[0, sc2, 0],
[0, 0, 1]])
#rotation
RC=np.matrix([[cos(rc), sin(rc), 0],
[-sin(rc), cos(rc), 0],
[0 , 0, 1]])
#shear
SHC=np.matrix([[1, sch1,0],
[sch2, 1,0],
[0, 0,1]])
#apply
#M=TC*SC*RC*SHC
if len(T)==3:
M=TC*RC
if len(T)==4:
M=TC*SC*RC
if len(T)==5:
M=TC*SC*RC
if len(T)==7:
M=TC*SC*RC*SHC
M=np.array(M)
AT=affine_transform2d(A,M)
return AT
def objective_mi(T,A,B,binA,binB,order=0,map_type='affine2d'):
''' Objective function for mutual information
'''
AT=apply_mapping(A,T,order=0,map_type=map_type)
#AT=np.round(AT)
AT=AT.T
NegMI= -mutual_information(AT,B,binA,binB)
print '====',T,'====> - MI : ',NegMI
#pylab.imshow(AT)
#raw_input('Press Enter...')
#pylab.imshow(np.hstack((A,B,AT)))
#raw_input('Press Enter...')
return NegMI
def objective_sd(T,A,B,order=0,map_type='affine2d'):
AT=apply_mapping(A,T,order=0,map_type=map_type)
AT=AT.T
if AT.sum()==0:
SD=10**15
else:
SD= np.sum((AT-B)**2)/np.prod(AT.shape)
print '====',T,'====> SD : ',SD
#pylab.imshow(np.hstack((A,B,AT)))
#raw_input('Press Enter...')
return SD
def register(A,B,guess,metric='sd',binA=None,binB=None,xtol=0.1,ftol=0.01,order=0,map_type='affine2d'):
''' Register source A to target B using modified powell's method
Powell's method tries to minimize the objective function
'''
if metric=='mi':
finalT=fmin_powell(objective_mi,x0=guess,args=(A,B,binA,binB,order,map_type),xtol=xtol,ftol=ftol)
#finalT=leastsq(func=objective_mi,x0=np.array(guess),args=(A,B,binA,binB,order,map_type))
if metric=='sd':
finalT=fmin_powell(objective_sd,x0=guess,args=(A,B,order,map_type),xtol=xtol,ftol=ftol)
#finalT=leastsq(func=objective_sd,x0=np.array(guess),args=(A,B,order,map_type))
return finalT
def evaluate(A,B,guess,metric='sd',binA=None,binB=None,xtol=0.1,ftol=0.01,order=0,map_type='affine2d'):
#tc1,tc2,sc1,sc2,rc=T
tc1=np.linspace(-50,50,20)
tc2=np.linspace(-50,50,20)
sc1=np.linspace(-1.2,1.2,10)
sc2=np.linspace(-1.2,1.2,10)
rc=np.linspace(0,np.pi,8)
f_min=np.inf
T_final=[]
'''
for c1 in tc1:
for c2 in tc2:
for s1 in sc1:
for s2 in sc2:
for r in rc:
T=[c1,c2,s1,s2,r]
f=objective_sd(T,A,B,order=0,map_type='affine2d')
if f<f_min:
f_min=f
T_final=T
'''
for c1 in tc1:
for c2 in tc2:
T=[c1,c2,guess[2],guess[3],guess[4]]
f=objective_sd(T,A,B,order=0,map_type='affine2d')
if f<f_min:
f_min=f
T_final=T
return T_final
def test(map_type='affine2d',xtol=0.0001,ftol=0.0001):
import Image
#pic='/home/eg01/Desktop/brain.jpg'
#pic='/home/eg01/Desktop/alpha.png'
pic='/tmp/landmarks1.png'
#pic='/tmp/lenag2.png'
imgA=Image.open(pic)
#imgA=imgA.resize((100,100))
imgA=imgA.rotate(25)
A=np.array(imgA).astype('float32')
A=(A[:,:,0]+A[:,:,1]+A[:,:,2])/3.
#A=A.sum(axis=-1)/3.
imgB=imgA.copy()
pic2='/tmp/landmarks2.png'
#pic2='/tmp/lenag2.png'
imgB=Image.open(pic2)
#imgB=imgB.resize(
#B=np.array(imgB.rotate(90)).astype('float32')
B=np.array(imgB).astype('float32')
B=(B[:,:,0]+B[:,:,1]+B[:,:,2])/3.
#zero padding
Z=np.zeros((A.shape[0]+50,A.shape[1]+50))
Z[25:25+A.shape[0],25:25+A.shape[1]]=A
A=Z
Z2=np.zeros((B.shape[0]+50,B.shape[1]+50))
Z2[25:25+B.shape[0],25:25+B.shape[1]]=B
B=Z2
binA=np.r_[-np.inf,np.linspace(A.min(),A.max(),30),np.inf]
binB=np.r_[-np.inf,np.linspace(B.min(),B.max(),30),np.inf]
if A.ndim==2:
#guess=np.array([0.,0.,0])
guess=np.array([0.,0.,1.,1.,0])
#translation[2], scale[2], rotation[1], shear[2]
#guess=np.array([0,0,1,1,0,0,0])
print A.shape
print B.shape
#res=register(A,B,guess=guess,metric='sd',xtol=xtol,ftol=ftol,order=0,map_type=map_type)
#res=register(A,B,guess=guess,metric='mi',binA=binA,binB=binB,xtol=xtol,ftol=ftol,order=0,map_type=map_type)
#res=evaluate(A,B,guess=guess,metric='sd')
res=[-44.736842105263158, 44.736842105263165, 1.0,1.]#, 1.0, 0.0]
#res=guess
res=register(A,B,guess=res,metric='sd',xtol=xtol,ftol=ftol,order=0,map_type=map_type)
print res
#return A,B,
AR=apply_mapping(A,res,order=0,map_type=map_type)
pylab.imshow(np.hstack((A,B,AR.T)))
raw_input('Press Enter...')
return A,B,AR.T
if __name__ == '__main__':
A=np.array([[1,.5,.2,0,0],[.5,1,.5,0,0],[.2,.5,1,0,0],[0,0,0,0,0],[0,0,0,0,0]])
B=np.array([[0,0,0,0,0],[0,1,.5,.2,0],[0,.5,1,.5,0],[0,.2,.5,1,0],[0,0,0,0,0]])
binA=np.array([-np.Inf,.1,.35,.75,np.Inf])
binB=np.array([-np.Inf,.1,.35,.75,np.Inf])
| |
# coding: utf-8
# # Test pyIAST for match with competitive Langmuir model
# In the case that the pure-component isotherms $N_{i,pure}(P)$ follow the Langmuir model with the same saturation loading $M$:
#
# $N_{i,pure} = M \frac{K_iP}{1+K_iP},$
#
# The mixed gas adsorption isotherm follows the competitive Langmuir isotherm:
#
# $N_i = M \frac{K_i p_i}{1 + \sum_j K_jp_j},$
#
# where $p_i$ is the partial pressure of component $i$. Here, we generate synthetic pure-component adsorption isotherm data and confirm that pyIAST agrees with the competitive Langmuir isotherm for 3 components.
# In[1]:
from __future__ import absolute_import
import numpy as np
import pyiast
import pandas as pd
import matplotlib.pyplot as plt
from six.moves import range
plt.style.use('fivethirtyeight')
colors = ['b', 'g', 'r'] # for representing each component
component_names = {0: 'A', 1: 'B', 2: 'C'}
# ## Generate synthetic pure-component isotherm data, fit Langmuir models to them.
# Model parameters ($M$, $\{K_i\}$)
# In[2]:
M = 1.0
langmuirKs = [2.0, 10.0, 20.0] # K_i
# Generate data according to Langmuir model, store in list of Pandas DataFrames
# In[3]:
pressure = np.logspace(-3, np.log10(10), 20)
dfs = [
pd.DataFrame({
'P':
pressure,
'L':
M * langmuirKs[i] * pressure / (1.0 + langmuirKs[i] * pressure)
}) for i in range(3)
]
# Use pyIAST to fit Lanmguir models to the data, then plot fits
# In[4]:
isotherms = [
pyiast.ModelIsotherm(
dfs[i], pressure_key='P', loading_key='L', model='Langmuir')
for i in range(3)
]
for i in range(len(isotherms)):
isotherms[i].print_params()
pyiast.plot_isotherm(isotherms[i])
# Plot synthetic data all in one plot for paper
# In[5]:
p_plot = np.logspace(-3, np.log10(11)) # for plotting
fig = plt.figure(facecolor='w')
for i in range(len(isotherms)):
plt.scatter(dfs[i]['P'], dfs[i]['L'], color=colors[i], s=50, label=None)
plt.plot(
p_plot,
M * langmuirKs[i] * p_plot / (1.0 + langmuirKs[i] * p_plot),
color=colors[i],
linewidth=2,
label=r'$N_%s(P) = \frac{%d P}{1+%dP}$' %
(component_names[i], langmuirKs[i], langmuirKs[i]))
plt.xlim([-.05 * 10, 1.05 * 10])
plt.ylim([-.05 * M, M * 1.05])
plt.xlabel('Pressure (bar)')
plt.ylabel('Gas uptake (mmol/g)')
plt.legend(loc='lower right')
plt.tight_layout()
plt.savefig(
'pure_component_Langmuir.png',
format='png',
dpi=300,
facecolor=fig.get_facecolor())
plt.show()
# ## Compare pyIAST predicted component loadings to that of competitive Langmuir
# Let us consider a tertiary mixture of components 0, 1, and 2 above at a total pressure of `total_pressure` bar.
# In[6]:
total_pressure = 1.0
# We will explore gas phase composition space ($\{y_i\}$) by generating random compositions and checking that they are within the triangle. We do not want to get too close to a pure phase boundary becuase of numerical instability, so we keep a distance `dx` away from pure phases. We will perform `num_tests` tests.
# In[15]:
dx = 0.0001
num_tests = 100
# Generate the compositions and store in list `compositions`
# In[16]:
compositions = []
test_no = 0
while test_no < num_tests:
# generate random compoisitions
y1 = np.random.uniform(dx, 1.0 - dx)
y2 = np.random.uniform(dx, 1.0 - dx)
y3 = 1.0 - y2 - y1
# check that composition is within the triangle
if y3 < dx:
continue
# viable composition
compositions.append([y1, y2, y3])
# keep generating until we have num_tests
test_no += 1
# Next, we assert that pyIAST gives the same result as the competitive Langmuir isotherm for each of these compositions.
# Function to compute loading according to competitive Langmuir
# In[17]:
def competitive_langmuir_loading(partial_pressures, i):
"""
Calculate loading of component i according to competitive Langmuir
"""
return M * langmuirKs[i] * partial_pressures[i] / (
1.0 + np.dot(langmuirKs, partial_pressures))
# Function to compute loading according to pyIAST
# In[10]:
def iast_loading(partial_pressures, i):
"""
Calculate loading of component i according to IAST
partial_pressures: Array, partial pressures of each component
i: component in the mixture
"""
component_loadings = pyiast.iast(partial_pressures, isotherms)
return component_loadings[i]
# Loop over compositions, assert pyIAST agrees with competitive Langmuir for each component. If this runs, then there is agreement!
# In[14]:
for i in range(num_tests):
partial_pressure = np.array(compositions[i]) * total_pressure
# for each component...
for c in range(len(langmuirKs)):
np.testing.assert_almost_equal(
competitive_langmuir_loading(partial_pressure, c),
iast_loading(partial_pressure, c),
decimal=4)
# ### This is using a custom library to plot the phase diagrams for the paper.
# Use ternary to plot phase diagram
# https://github.com/marcharper/python-ternary
# In[19]:
import ternary
scale = 10 # resolution in triangle
axis_colors = {'l': colors[1], 'r': colors[0], 'b': colors[2]}
cmaps = ["Blues", "Greens", "Reds"]
iast_or_lang = 'lang'
for c in range(3):
if iast_or_lang == 'lang':
f = lambda p: competitive_langmuir_loading(p, c)
else:
f = lambda p: iast_loading(p, c)
# loop over component
fig, ax = plt.subplots(facecolor='w')
ax.axis("off")
figure, tax = ternary.figure(ax=ax, scale=scale)
tax.heatmapf(
f,
boundary=False,
style="hexagonal",
cmap=plt.cm.get_cmap(cmaps[c]),
vmax=M,
vmin=0.0,
cbarlabel="%s uptake (mmol/g)" % component_names[c])
tax.boundary(linewidth=2.0, color_dict=axis_colors)
tax.left_axis_label("$p_1$ (bar)", color=axis_colors['l'], offset=0.16)
tax.right_axis_label("$p_0$ (bar)", color=axis_colors['r'], offset=0.16)
tax.bottom_axis_label("$p_2$ (bar)", color=axis_colors['b'], offset=-0.06)
tax.gridlines(
color="blue",
multiple=1,
linewidth=2,
horizontal_kwargs={'color': axis_colors['b']},
left_kwargs={'color': axis_colors['l']},
right_kwargs={'color': axis_colors['r']},
alpha=0.7) # Every 5th gridline, can be a float
tax.ticks(
axis='rlb',
linewidth=1,
locations=np.arange(scale + 1),
clockwise=True,
color_dict=axis_colors,
ticks=["%.1f" % (1.0 - 1.0 * i / scale) for i in range(scale + 1)],
offset=0.03)
tax.clear_matplotlib_ticks()
tax._redraw_labels()
# if iast_or_lang == 'iast':
# tax.set_title("IAST uptake, component %d" % c, y=1.08, fontsize=14)
# if iast_or_lang == 'lang':
# tax.set_title("Competitive Langmuir uptake, component %d" % c, y=1.08, fontsize=14)
plt.tight_layout()
if iast_or_lang == 'iast':
plt.savefig(
"Tertiary_diagram_IAST_component_%d.png" % c,
format='png',
dpi=300,
facecolor=fig.get_facecolor())
if iast_or_lang == 'lang':
plt.savefig(
"Tertiary_diagram_Langmuir_component_%d.png" % c,
format='png',
dpi=300,
facecolor=fig.get_facecolor())
tax.show()
# In[ ]:
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dispatches tests, either sharding or replicating them.
Performs the following steps:
* Create a test collection factory, using the given tests
- If sharding: test collection factory returns the same shared test collection
to all test runners
- If replciating: test collection factory returns a unique test collection to
each test runner, with the same set of tests in each.
* Create a test runner for each device.
* Run each test runner in its own thread, grabbing tests from the test
collection until there are no tests left.
"""
# TODO(jbudorick) Deprecate and remove this class after any relevant parts have
# been ported to the new environment / test instance model.
import logging
import threading
from devil.android import device_errors
from devil.utils import reraiser_thread
from devil.utils import watchdog_timer
from pylib import constants
from pylib.base import base_test_result
from pylib.base import test_collection
DEFAULT_TIMEOUT = 7 * 60 # seven minutes
class _ThreadSafeCounter(object):
"""A threadsafe counter."""
def __init__(self):
self._lock = threading.Lock()
self._value = 0
def GetAndIncrement(self):
"""Get the current value and increment it atomically.
Returns:
The value before incrementing.
"""
with self._lock:
pre_increment = self._value
self._value += 1
return pre_increment
class _Test(object):
"""Holds a test with additional metadata."""
def __init__(self, test, tries=0):
"""Initializes the _Test object.
Args:
test: The test.
tries: Number of tries so far.
"""
self.test = test
self.tries = tries
def _RunTestsFromQueue(runner, collection, out_results, watcher,
num_retries, tag_results_with_device=False):
"""Runs tests from the collection until empty using the given runner.
Adds TestRunResults objects to the out_results list and may add tests to the
out_retry list.
Args:
runner: A TestRunner object used to run the tests.
collection: A TestCollection from which to get _Test objects to run.
out_results: A list to add TestRunResults to.
watcher: A watchdog_timer.WatchdogTimer object, used as a shared timeout.
num_retries: Number of retries for a test.
tag_results_with_device: If True, appends the name of the device on which
the test was run to the test name. Used when replicating to identify
which device ran each copy of the test, and to ensure each copy of the
test is recorded separately.
"""
def TagTestRunResults(test_run_results):
"""Tags all results with the last 4 digits of the device id.
Used when replicating tests to distinguish the same tests run on different
devices. We use a set to store test results, so the hash (generated from
name and tag) must be unique to be considered different results.
"""
new_test_run_results = base_test_result.TestRunResults()
for test_result in test_run_results.GetAll():
test_result.SetName('%s_%s' % (runner.device_serial[-4:],
test_result.GetName()))
new_test_run_results.AddResult(test_result)
return new_test_run_results
for test in collection:
watcher.Reset()
try:
if not runner.device.IsOnline():
# Device is unresponsive, stop handling tests on this device.
msg = 'Device %s is unresponsive.' % runner.device_serial
logging.warning(msg)
raise device_errors.DeviceUnreachableError(msg)
result, retry = runner.RunTest(test.test)
if tag_results_with_device:
result = TagTestRunResults(result)
test.tries += 1
if retry and test.tries <= num_retries:
# Retry non-passing results, only record passing results.
pass_results = base_test_result.TestRunResults()
pass_results.AddResults(result.GetPass())
out_results.append(pass_results)
logging.warning('Will retry test %s, try #%s.', retry, test.tries)
collection.add(_Test(test=retry, tries=test.tries))
else:
# All tests passed or retry limit reached. Either way, record results.
out_results.append(result)
except:
# An unhandleable exception, ensure tests get run by another device and
# reraise this exception on the main thread.
collection.add(test)
raise
finally:
# Retries count as separate tasks so always mark the popped test as done.
collection.test_completed()
def _SetUp(runner_factory, device, out_runners, threadsafe_counter):
"""Creates a test runner for each device and calls SetUp() in parallel.
Note: if a device is unresponsive the corresponding TestRunner will not be
added to out_runners.
Args:
runner_factory: Callable that takes a device and index and returns a
TestRunner object.
device: The device serial number to set up.
out_runners: List to add the successfully set up TestRunner object.
threadsafe_counter: A _ThreadSafeCounter object used to get shard indices.
"""
try:
index = threadsafe_counter.GetAndIncrement()
logging.warning('Creating shard %s for device %s.', index, device)
runner = runner_factory(device, index)
runner.SetUp()
out_runners.append(runner)
except device_errors.DeviceUnreachableError as e:
logging.warning('Failed to create shard for %s: [%s]', device, e)
def _RunAllTests(runners, test_collection_factory, num_retries, timeout=None,
tag_results_with_device=False):
"""Run all tests using the given TestRunners.
Args:
runners: A list of TestRunner objects.
test_collection_factory: A callable to generate a TestCollection object for
each test runner.
num_retries: Number of retries for a test.
timeout: Watchdog timeout in seconds.
tag_results_with_device: If True, appends the name of the device on which
the test was run to the test name. Used when replicating to identify
which device ran each copy of the test, and to ensure each copy of the
test is recorded separately.
Returns:
A tuple of (TestRunResults object, exit code)
"""
logging.warning('Running tests with %s test %s.',
len(runners), 'runners' if len(runners) != 1 else 'runner')
results = []
exit_code = 0
run_results = base_test_result.TestRunResults()
watcher = watchdog_timer.WatchdogTimer(timeout)
test_collections = [test_collection_factory() for _ in runners]
threads = [
reraiser_thread.ReraiserThread(
_RunTestsFromQueue,
[r, tc, results, watcher, num_retries, tag_results_with_device],
name=r.device_serial[-4:])
for r, tc in zip(runners, test_collections)]
workers = reraiser_thread.ReraiserThreadGroup(threads)
workers.StartAll()
try:
workers.JoinAll(watcher)
except device_errors.CommandFailedError:
logging.exception('Command failed on device.')
except device_errors.CommandFailedError:
logging.exception('Command timed out on device.')
except device_errors.DeviceUnreachableError:
logging.exception('Device became unreachable.')
if not all((len(tc) == 0 for tc in test_collections)):
logging.error('Only ran %d tests (all devices are likely offline).',
len(results))
for tc in test_collections:
run_results.AddResults(base_test_result.BaseTestResult(
t, base_test_result.ResultType.UNKNOWN) for t in tc.test_names())
for r in results:
run_results.AddTestRunResults(r)
if not run_results.DidRunPass():
exit_code = constants.ERROR_EXIT_CODE
return (run_results, exit_code)
def _CreateRunners(runner_factory, devices, timeout=None):
"""Creates a test runner for each device and calls SetUp() in parallel.
Note: if a device is unresponsive the corresponding TestRunner will not be
included in the returned list.
Args:
runner_factory: Callable that takes a device and index and returns a
TestRunner object.
devices: List of device serial numbers as strings.
timeout: Watchdog timeout in seconds, defaults to the default timeout.
Returns:
A list of TestRunner objects.
"""
logging.warning('Creating %s test %s.', len(devices),
'runners' if len(devices) != 1 else 'runner')
runners = []
counter = _ThreadSafeCounter()
threads = reraiser_thread.ReraiserThreadGroup(
[reraiser_thread.ReraiserThread(_SetUp,
[runner_factory, d, runners, counter],
name=str(d)[-4:])
for d in devices])
threads.StartAll()
threads.JoinAll(watchdog_timer.WatchdogTimer(timeout))
return runners
def _TearDownRunners(runners, timeout=None):
"""Calls TearDown() for each test runner in parallel.
Args:
runners: A list of TestRunner objects.
timeout: Watchdog timeout in seconds, defaults to the default timeout.
"""
threads = reraiser_thread.ReraiserThreadGroup(
[reraiser_thread.ReraiserThread(r.TearDown, name=r.device_serial[-4:])
for r in runners])
threads.StartAll()
threads.JoinAll(watchdog_timer.WatchdogTimer(timeout))
def ApplyMaxPerRun(tests, max_per_run):
"""Rearrange the tests so that no group contains more than max_per_run tests.
Args:
tests:
max_per_run:
Returns:
A list of tests with no more than max_per_run per run.
"""
tests_expanded = []
for test_group in tests:
if type(test_group) != str:
# Do not split test objects which are not strings.
tests_expanded.append(test_group)
else:
test_split = test_group.split(':')
for i in range(0, len(test_split), max_per_run):
tests_expanded.append(':'.join(test_split[i:i+max_per_run]))
return tests_expanded
def RunTests(tests, runner_factory, devices, shard=True,
test_timeout=DEFAULT_TIMEOUT, setup_timeout=DEFAULT_TIMEOUT,
num_retries=2, max_per_run=256):
"""Run all tests on attached devices, retrying tests that don't pass.
Args:
tests: List of tests to run.
runner_factory: Callable that takes a device and index and returns a
TestRunner object.
devices: List of attached devices.
shard: True if we should shard, False if we should replicate tests.
- Sharding tests will distribute tests across all test runners through a
shared test collection.
- Replicating tests will copy all tests to each test runner through a
unique test collection for each test runner.
test_timeout: Watchdog timeout in seconds for running tests.
setup_timeout: Watchdog timeout in seconds for creating and cleaning up
test runners.
num_retries: Number of retries for a test.
max_per_run: Maximum number of tests to run in any group.
Returns:
A tuple of (base_test_result.TestRunResults object, exit code).
"""
if not tests:
logging.critical('No tests to run.')
return (base_test_result.TestRunResults(), constants.ERROR_EXIT_CODE)
tests_expanded = ApplyMaxPerRun(tests, max_per_run)
if shard:
# Generate a shared TestCollection object for all test runners, so they
# draw from a common pool of tests.
shared_test_collection = test_collection.TestCollection(
[_Test(t) for t in tests_expanded])
test_collection_factory = lambda: shared_test_collection
tag_results_with_device = False
log_string = 'sharded across devices'
else:
# Generate a unique TestCollection object for each test runner, but use
# the same set of tests.
test_collection_factory = lambda: test_collection.TestCollection(
[_Test(t) for t in tests_expanded])
tag_results_with_device = True
log_string = 'replicated on each device'
logging.info('Will run %d tests (%s): %s',
len(tests_expanded), log_string, str(tests_expanded))
runners = _CreateRunners(runner_factory, devices, setup_timeout)
try:
return _RunAllTests(runners, test_collection_factory,
num_retries, test_timeout, tag_results_with_device)
finally:
try:
_TearDownRunners(runners, setup_timeout)
except device_errors.DeviceUnreachableError as e:
logging.warning('Device unresponsive during TearDown: [%s]', e)
except Exception: # pylint: disable=broad-except
logging.exception('Unexpected exception caught during TearDown')
| |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class RegulationList(ListResource):
def __init__(self, version):
"""
Initialize the RegulationList
:param Version version: Version that contains the resource
:returns: twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationList
:rtype: twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationList
"""
super(RegulationList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/RegulatoryCompliance/Regulations'.format(**self._solution)
def stream(self, end_user_type=values.unset, iso_country=values.unset,
number_type=values.unset, limit=None, page_size=None):
"""
Streams RegulationInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param RegulationInstance.EndUserType end_user_type: The type of End User of the Regulation resource
:param unicode iso_country: The ISO country code of the phone number's country
:param unicode number_type: The type of phone number being regulated
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
end_user_type=end_user_type,
iso_country=iso_country,
number_type=number_type,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'])
def list(self, end_user_type=values.unset, iso_country=values.unset,
number_type=values.unset, limit=None, page_size=None):
"""
Lists RegulationInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param RegulationInstance.EndUserType end_user_type: The type of End User of the Regulation resource
:param unicode iso_country: The ISO country code of the phone number's country
:param unicode number_type: The type of phone number being regulated
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationInstance]
"""
return list(self.stream(
end_user_type=end_user_type,
iso_country=iso_country,
number_type=number_type,
limit=limit,
page_size=page_size,
))
def page(self, end_user_type=values.unset, iso_country=values.unset,
number_type=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of RegulationInstance records from the API.
Request is executed immediately
:param RegulationInstance.EndUserType end_user_type: The type of End User of the Regulation resource
:param unicode iso_country: The ISO country code of the phone number's country
:param unicode number_type: The type of phone number being regulated
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of RegulationInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationPage
"""
data = values.of({
'EndUserType': end_user_type,
'IsoCountry': iso_country,
'NumberType': number_type,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(method='GET', uri=self._uri, params=data, )
return RegulationPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of RegulationInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of RegulationInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return RegulationPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a RegulationContext
:param sid: The unique string that identifies the Regulation resource
:returns: twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationContext
:rtype: twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationContext
"""
return RegulationContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a RegulationContext
:param sid: The unique string that identifies the Regulation resource
:returns: twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationContext
:rtype: twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationContext
"""
return RegulationContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Numbers.V2.RegulationList>'
class RegulationPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the RegulationPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationPage
:rtype: twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationPage
"""
super(RegulationPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of RegulationInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationInstance
"""
return RegulationInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Numbers.V2.RegulationPage>'
class RegulationContext(InstanceContext):
def __init__(self, version, sid):
"""
Initialize the RegulationContext
:param Version version: Version that contains the resource
:param sid: The unique string that identifies the Regulation resource
:returns: twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationContext
:rtype: twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationContext
"""
super(RegulationContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/RegulatoryCompliance/Regulations/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch the RegulationInstance
:returns: The fetched RegulationInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return RegulationInstance(self._version, payload, sid=self._solution['sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Numbers.V2.RegulationContext {}>'.format(context)
class RegulationInstance(InstanceResource):
class EndUserType(object):
INDIVIDUAL = "individual"
BUSINESS = "business"
def __init__(self, version, payload, sid=None):
"""
Initialize the RegulationInstance
:returns: twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationInstance
"""
super(RegulationInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'friendly_name': payload.get('friendly_name'),
'iso_country': payload.get('iso_country'),
'number_type': payload.get('number_type'),
'end_user_type': payload.get('end_user_type'),
'requirements': payload.get('requirements'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: RegulationContext for this RegulationInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationContext
"""
if self._context is None:
self._context = RegulationContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the Regulation resource
:rtype: unicode
"""
return self._properties['sid']
@property
def friendly_name(self):
"""
:returns: A human-readable description of the Regulation resource
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def iso_country(self):
"""
:returns: The ISO country code of the phone number's country
:rtype: unicode
"""
return self._properties['iso_country']
@property
def number_type(self):
"""
:returns: The type of phone number restricted by the regulatory requirement
:rtype: unicode
"""
return self._properties['number_type']
@property
def end_user_type(self):
"""
:returns: The type of End User of the Regulation resource
:rtype: RegulationInstance.EndUserType
"""
return self._properties['end_user_type']
@property
def requirements(self):
"""
:returns: The sid of a regulation object that dictates requirements
:rtype: dict
"""
return self._properties['requirements']
@property
def url(self):
"""
:returns: The absolute URL of the Regulation resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch the RegulationInstance
:returns: The fetched RegulationInstance
:rtype: twilio.rest.numbers.v2.regulatory_compliance.regulation.RegulationInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Numbers.V2.RegulationInstance {}>'.format(context)
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import os
import abc
import warnings
import six
from six.moves import filter, map
from collections import defaultdict
from monty.design_patterns import cached_class
from monty.serialization import loadfn
from monty.json import MSONable
from pymatgen.io.vasp.sets import MITRelaxSet, MPRelaxSet
from pymatgen.core.periodic_table import Element
<<<<<<< HEAD
from pymatgen.analysis.structure_analyzer import oxide_type
=======
from pymatgen.analysis.structure_analyzer import oxide_type, sulfide_type
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
"""
This module implements Compatibility corrections for mixing runs of different
functionals.
"""
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
__author__ = "Shyue Ping Ong, Anubhav Jain, Stephen Dacek, Sai Jayaraman"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 19, 2012"
class CompatibilityError(Exception):
"""
Exception class for Compatibility. Raised by attempting correction
on incompatible calculation
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class Correction(six.with_metaclass(abc.ABCMeta, object)):
"""
A Correction class is a pre-defined scheme for correction a computed
entry based on the type and chemistry of the structure and the
calculation parameters. All Correction classes must implement a
correct_entry method.
"""
@abc.abstractmethod
def get_correction(self, entry):
"""
Returns correction for a single entry.
Args:
entry: A ComputedEntry object.
Returns:
The energy correction to be applied.
Raises:
CompatibilityError if entry is not compatible.
"""
return
def correct_entry(self, entry):
"""
Corrects a single entry.
Args:
entry: A ComputedEntry object.
Returns:
An processed entry.
Raises:
CompatibilityError if entry is not compatible.
"""
entry.correction += self.get_correction(entry)
return entry
class PotcarCorrection(Correction):
"""
Checks that POTCARs are valid within a pre-defined input set. This
ensures that calculations performed using different InputSets are not
compared against each other.
Entry.parameters must contain a "potcar_symbols" key that is a list of
all POTCARs used in the run. Again, using the example of an Fe2O3 run
using Materials Project parameters, this would look like
entry.parameters["potcar_symbols"] = ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002'].
Args:
input_set: InputSet object used to generate the runs (used to check
for correct potcar symbols)
check_hash (bool): If true, uses the potcar hash to check for valid
potcars. If false, uses the potcar symbol (Less reliable).
Defaults to True
Raises:
ValueError if entry do not contain "potcar_symbols" key.
CombatibilityError if wrong potcar symbols
"""
def __init__(self, input_set, check_hash=False):
potcar_settings = input_set.CONFIG["POTCAR"]
if isinstance(list(potcar_settings.values())[-1],
dict):
if check_hash:
self.valid_potcars = {k: d["hash"] for k, d in
potcar_settings.items()}
else:
self.valid_potcars = {k: d["symbol"] for k, d in
potcar_settings.items()}
else:
if check_hash:
raise ValueError('Cannot check hashes of potcars,'
' hashes are not set')
else:
self.valid_potcars = {k: d for k, d in
potcar_settings.items()}
self.input_set = input_set
self.check_hash = check_hash
def get_correction(self, entry):
if self.check_hash:
if entry.parameters.get("potcar_spec"):
psp_settings = set([d.get("hash")
for d in entry.parameters[
"potcar_spec"] if d])
else:
raise ValueError('Cannot check hash '
'without potcar_spec field')
else:
if entry.parameters.get("potcar_spec"):
psp_settings = set([d.get("titel").split()[1]
for d in entry.parameters[
"potcar_spec"] if d])
else:
psp_settings = set([sym.split()[1]
for sym in entry.parameters[
"potcar_symbols"] if sym])
if {self.valid_potcars[str(el)] for el in
entry.composition.elements} != psp_settings:
raise CompatibilityError('Incompatible potcar')
return 0
def __str__(self):
<<<<<<< HEAD
return "{} Potcar Correction".format(self.input_set.name)
=======
return "{} Potcar Correction".format(self.input_set.__name__)
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
@cached_class
class GasCorrection(Correction):
"""
Correct gas energies to obtain the right formation energies. Note that
this depends on calculations being run within the same input set.
Args:
config_file: Path to the selected compatibility.yaml config file.
"""
def __init__(self, config_file):
c = loadfn(config_file)
self.name = c['Name']
self.cpd_energies = c['Advanced']['CompoundEnergies']
def get_correction(self, entry):
comp = entry.composition
rform = entry.composition.reduced_formula
if rform in self.cpd_energies:
return self.cpd_energies[rform] * comp.num_atoms \
- entry.uncorrected_energy
return 0
def __str__(self):
return "{} Gas Correction".format(self.name)
@cached_class
class AnionCorrection(Correction):
"""
Correct anion energies to obtain the right formation energies. Note that
this depends on calculations being run within the same input set.
Args:
config_file: Path to the selected compatibility.yaml config file.
correct_peroxide: Specify whether peroxide/superoxide/ozonide
corrections are to be applied or not.
"""
def __init__(self, config_file, correct_peroxide=True):
c = loadfn(config_file)
self.oxide_correction = c['OxideCorrections']
self.sulfide_correction = c.get('SulfideCorrections', defaultdict(
float))
self.name = c['Name']
self.correct_peroxide = correct_peroxide
def get_correction(self, entry):
comp = entry.composition
<<<<<<< HEAD
rform = entry.composition.reduced_formula
if len(comp) >= 2 and sorted(comp.keys())[-1].symbol == "S":
return self.sulfide_correction["sulfide"] * comp["S"]
correction = 0
# Check for oxide, peroxide, superoxide, and ozonide corrections.
if self.correct_peroxide:
if len(comp) >= 2 and Element("O") in comp:
=======
if len(comp) == 1: # Skip element entry
return 0
correction = 0
# Check for sulfide corrections
if Element("S") in comp:
sf_type = "sulfide"
if entry.data.get("sulfide_type"):
sf_type = entry.data["sulfide_type"]
elif hasattr(entry, "structure"):
sf_type = sulfide_type(entry.structure)
if sf_type in self.sulfide_correction:
correction += self.sulfide_correction[sf_type] * comp["S"]
# Check for oxide, peroxide, superoxide, and ozonide corrections.
if Element("O") in comp:
if self.correct_peroxide:
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
if entry.data.get("oxide_type"):
if entry.data["oxide_type"] in self.oxide_correction:
ox_corr = self.oxide_correction[
entry.data["oxide_type"]]
correction += ox_corr * comp["O"]
if entry.data["oxide_type"] == "hydroxide":
ox_corr = self.oxide_correction["oxide"]
correction += ox_corr * comp["O"]
elif hasattr(entry, "structure"):
ox_type, nbonds = oxide_type(entry.structure, 1.05,
return_nbonds=True)
if ox_type in self.oxide_correction:
correction += self.oxide_correction[ox_type] * \
nbonds
elif ox_type == "hydroxide":
<<<<<<< HEAD
correction += self.oxide_correction["oxide"] * comp["O"]
else:
=======
correction += self.oxide_correction["oxide"] * \
comp["O"]
else:
warnings.warn(
"No structure or oxide_type parameter present. Note"
"that peroxide/superoxide corrections are not as "
"reliable and relies only on detection of special"
"formulas, e.g., Li2O2.")
rform = entry.composition.reduced_formula
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
if rform in UCorrection.common_peroxides:
correction += self.oxide_correction["peroxide"] * \
comp["O"]
elif rform in UCorrection.common_superoxides:
correction += self.oxide_correction["superoxide"] * \
comp["O"]
elif rform in UCorrection.ozonides:
correction += self.oxide_correction["ozonide"] * \
comp["O"]
elif Element("O") in comp.elements and len(comp.elements)\
> 1:
<<<<<<< HEAD
correction += self.oxide_correction['oxide'] * comp["O"]
else:
correction += self.oxide_correction['oxide'] * comp["O"]
=======
correction += self.oxide_correction['oxide'] * \
comp["O"]
else:
correction += self.oxide_correction['oxide'] * comp["O"]
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
return correction
def __str__(self):
return "{} Anion Correction".format(self.name)
@cached_class
class AqueousCorrection(Correction):
"""
This class implements aqueous phase compound corrections for elements
and H2O.
Args:
config_file: Path to the selected compatibility.yaml config file.
"""
def __init__(self, config_file):
c = loadfn(config_file)
self.cpd_energies = c['AqueousCompoundEnergies']
self.name = c["Name"]
def get_correction(self, entry):
comp = entry.composition
rform = comp.reduced_formula
cpdenergies = self.cpd_energies
correction = 0
if rform in cpdenergies:
if rform in ["H2", "H2O"]:
correction = cpdenergies[rform] * comp.num_atoms \
- entry.uncorrected_energy - entry.correction
else:
correction += cpdenergies[rform] * comp.num_atoms
if not rform == "H2O":
correction += 0.5 * 2.46 * min(comp["H"]/2.0, comp["O"])
return correction
def __str__(self):
return "{} Aqueous Correction".format(self.name)
@cached_class
class UCorrection(Correction):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. Entry.parameters must contain a "hubbards" key which is a dict
of all non-zero Hubbard U values used in the calculation. For example,
if you ran a Fe2O3 calculation with Materials Project parameters,
this would look like entry.parameters["hubbards"] = {"Fe": 5.3}
If the "hubbards" key is missing, a GGA run is assumed.
It should be noted that ComputedEntries assimilated using the
pymatgen.apps.borg package and obtained via the MaterialsProject REST
interface using the pymatgen.matproj.rest package will automatically have
these fields populated.
Args:
config_file: Path to the selected compatibility.yaml config file.
input_set: InputSet object (to check for the +U settings)
compat_type: Two options, GGA or Advanced. GGA means all GGA+U
entries are excluded. Advanced means mixing scheme is
implemented to make entries compatible with each other,
but entries which are supposed to be done in GGA+U will have the
equivalent GGA entries excluded. For example, Fe oxides should
have a U value under the Advanced scheme. A GGA Fe oxide run
will therefore be excluded under the scheme.
"""
common_peroxides = ["Li2O2", "Na2O2", "K2O2", "Cs2O2", "Rb2O2", "BeO2",
"MgO2", "CaO2", "SrO2", "BaO2"]
common_superoxides = ["LiO2", "NaO2", "KO2", "RbO2", "CsO2"]
ozonides = ["LiO3", "NaO3", "KO3", "NaO5"]
def __init__(self, config_file, input_set, compat_type):
if compat_type not in ['GGA', 'Advanced']:
raise CompatibilityError("Invalid compat_type {}"
.format(compat_type))
c = loadfn(config_file)
self.input_set = input_set
if compat_type == 'Advanced':
self.u_settings = self.input_set.CONFIG["INCAR"]["LDAUU"]
self.u_corrections = c["Advanced"]["UCorrections"]
else:
self.u_settings = {}
self.u_corrections = {}
self.name = c["Name"]
self.compat_type = compat_type
def get_correction(self, entry):
if entry.parameters.get("run_type", "GGA") == "HF":
raise CompatibilityError('Invalid run type')
calc_u = entry.parameters.get("hubbards", None)
calc_u = defaultdict(int) if calc_u is None else calc_u
comp = entry.composition
elements = sorted([el for el in comp.elements if comp[el] > 0],
key=lambda el: el.X)
most_electroneg = elements[-1].symbol
correction = 0
ucorr = self.u_corrections.get(most_electroneg, {})
usettings = self.u_settings.get(most_electroneg, {})
for el in comp.elements:
sym = el.symbol
# Check for bad U values
if calc_u.get(sym, 0) != usettings.get(sym, 0):
raise CompatibilityError('Invalid U value of %s on %s' %
(calc_u.get(sym, 0), sym))
if sym in ucorr:
correction += float(ucorr[sym]) * comp[el]
return correction
def __str__(self):
return "{} {} Correction".format(self.name, self.compat_type)
class Compatibility(MSONable):
"""
The Compatibility class combines a list of corrections to be applied to
an entry or a set of entries. Note that some of the Corrections have
interdependencies. For example, PotcarCorrection must always be used
before any other compatibility. Also, GasCorrection("MP") must be used
with PotcarCorrection("MP") (similarly with "MIT"). Typically,
you should use the specific MaterialsProjectCompatibility and
MITCompatibility subclasses instead.
Args:
corrections: List of corrections to apply.
"""
def __init__(self, corrections):
self.corrections = corrections
def process_entry(self, entry):
"""
Process a single entry with the chosen Corrections.
Args:
entry: A ComputedEntry object.
Returns:
An adjusted entry if entry is compatible, otherwise None is
returned.
"""
try:
corrections = self.get_corrections_dict(entry)
except CompatibilityError:
return None
entry.correction = sum(corrections.values())
return entry
def get_corrections_dict(self, entry):
"""
Returns the corrections applied to a particular entry.
Args:
entry: A ComputedEntry object.
Returns:
({correction_name: value})
"""
corrections = {}
for c in self.corrections:
val = c.get_correction(entry)
if val != 0:
corrections[str(c)] = val
return corrections
def process_entries(self, entries):
"""
Process a sequence of entries with the chosen Compatibility scheme.
Args:
entries: A sequence of entries.
Returns:
An list of adjusted entries. Entries in the original list which
are not compatible are excluded.
"""
return list(filter(None, map(self.process_entry, entries)))
def get_explanation_dict(self, entry):
"""
Provides an explanation dict of the corrections that are being applied
for a given compatibility scheme. Inspired by the "explain" methods
in many database methodologies.
Args:
entry: A ComputedEntry.
Returns:
(dict) of the form
{"Compatibility": "string",
"Uncorrected_energy": float,
"Corrected_energy": float,
"Corrections": [{"Name of Correction": {
"Value": float, "Explanation": "string"}]}
"""
centry = self.process_entry(entry)
if centry is None:
uncorrected_energy = entry.uncorrected_energy
corrected_energy = None
else:
uncorrected_energy = centry.uncorrected_energy
corrected_energy = centry.energy
d = {"compatibility": self.__class__.__name__,
"uncorrected_energy": uncorrected_energy,
"corrected_energy": corrected_energy}
corrections = []
corr_dict = self.get_corrections_dict(entry)
for c in self.corrections:
cd = {"name": str(c),
"description": c.__doc__.split("Args")[0].strip(),
"value": corr_dict.get(str(c), 0)}
corrections.append(cd)
d["corrections"] = corrections
return d
def explain(self, entry):
"""
Prints an explanation of the corrections that are being applied for a
given compatibility scheme. Inspired by the "explain" methods in many
database methodologies.
Args:
entry: A ComputedEntry.
"""
d = self.get_explanation_dict(entry)
print("The uncorrected value of the energy of %s is %f eV" %
(entry.composition, d["uncorrected_energy"]))
print("The following corrections / screening are applied for %s:\n" %
d["compatibility"])
for c in d["corrections"]:
print("%s correction: %s\n" % (c["name"],
c["description"]))
print("For the entry, this correction has the value %f eV." % c[
"value"])
print("-" * 30)
print("The final energy after corrections is %f" % d[
"corrected_energy"])
class MaterialsProjectCompatibility(Compatibility):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. Note that this should only be used for VASP calculations using the
MaterialsProject parameters (see pymatgen.io.vaspio_set.MPVaspInputSet).
Using this compatibility scheme on runs with different parameters is not
valid.
Args:
compat_type: Two options, GGA or Advanced. GGA means all GGA+U
entries are excluded. Advanced means mixing scheme is
implemented to make entries compatible with each other,
but entries which are supposed to be done in GGA+U will have the
equivalent GGA entries excluded. For example, Fe oxides should
have a U value under the Advanced scheme. A GGA Fe oxide run
will therefore be excluded under the scheme.
correct_peroxide: Specify whether peroxide/superoxide/ozonide
corrections are to be applied or not.
check_potcar_hash (bool): Use potcar hash to verify potcars are correct.
"""
def __init__(self, compat_type="Advanced", correct_peroxide=True,
check_potcar_hash=False):
self.compat_type = compat_type
self.correct_peroxide = correct_peroxide
self.check_potcar_hash = check_potcar_hash
fp = os.path.join(MODULE_DIR, "MPCompatibility.yaml")
super(MaterialsProjectCompatibility, self).__init__(
[PotcarCorrection(MPRelaxSet, check_hash=check_potcar_hash),
GasCorrection(fp),
AnionCorrection(fp, correct_peroxide=correct_peroxide),
UCorrection(fp, MPRelaxSet, compat_type)])
class MITCompatibility(Compatibility):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. Note that this should only be used for VASP calculations using the
MIT parameters (see pymatgen.io.vaspio_set MITVaspInputSet). Using
this compatibility scheme on runs with different parameters is not valid.
Args:
compat_type: Two options, GGA or Advanced. GGA means all GGA+U
entries are excluded. Advanced means mixing scheme is
implemented to make entries compatible with each other,
but entries which are supposed to be done in GGA+U will have the
equivalent GGA entries excluded. For example, Fe oxides should
have a U value under the Advanced scheme. A GGA Fe oxide run
will therefore be excluded under the scheme.
correct_peroxide: Specify whether peroxide/superoxide/ozonide
corrections are to be applied or not.
check_potcar_hash (bool): Use potcar hash to verify potcars are correct.
"""
def __init__(self, compat_type="Advanced", correct_peroxide=True,
check_potcar_hash=False):
self.compat_type = compat_type
self.correct_peroxide = correct_peroxide
self.check_potcar_hash = check_potcar_hash
fp = os.path.join(MODULE_DIR, "MITCompatibility.yaml")
super(MITCompatibility, self).__init__(
[PotcarCorrection(MITRelaxSet, check_hash=check_potcar_hash),
GasCorrection(fp),
AnionCorrection(fp, correct_peroxide=correct_peroxide),
UCorrection(fp, MITRelaxSet, compat_type)])
class MITAqueousCompatibility(Compatibility):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. Note that this should only be used for VASP calculations using the
MIT parameters (see pymatgen.io.vaspio_set MITVaspInputSet). Using
this compatibility scheme on runs with different parameters is not valid.
Args:
compat_type: Two options, GGA or Advanced. GGA means all GGA+U
entries are excluded. Advanced means mixing scheme is
implemented to make entries compatible with each other,
but entries which are supposed to be done in GGA+U will have the
equivalent GGA entries excluded. For example, Fe oxides should
have a U value under the Advanced scheme. A GGA Fe oxide run
will therefore be excluded under the scheme.
correct_peroxide: Specify whether peroxide/superoxide/ozonide
corrections are to be applied or not.
check_potcar_hash (bool): Use potcar hash to verify potcars are correct.
"""
def __init__(self, compat_type="Advanced", correct_peroxide=True,
check_potcar_hash=False):
self.compat_type = compat_type
self.correct_peroxide = correct_peroxide
self.check_potcar_hash = check_potcar_hash
fp = os.path.join(MODULE_DIR, "MITCompatibility.yaml")
super(MITAqueousCompatibility, self).__init__(
[PotcarCorrection(MITRelaxSet, check_hash=check_potcar_hash),
GasCorrection(fp),
AnionCorrection(fp, correct_peroxide=correct_peroxide),
UCorrection(fp, MITRelaxSet, compat_type), AqueousCorrection(fp)])
class MaterialsProjectAqueousCompatibility(Compatibility):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. Note that this should only be used for VASP calculations using the
MaterialsProject parameters (see pymatgen.io.vaspio_set.MPVaspInputSet).
Using this compatibility scheme on runs with different parameters is not
valid.
Args:
compat_type: Two options, GGA or Advanced. GGA means all GGA+U
entries are excluded. Advanced means mixing scheme is
implemented to make entries compatible with each other,
but entries which are supposed to be done in GGA+U will have the
equivalent GGA entries excluded. For example, Fe oxides should
have a U value under the Advanced scheme. A GGA Fe oxide run
will therefore be excluded under the scheme.
correct_peroxide: Specify whether peroxide/superoxide/ozonide
corrections are to be applied or not.
check_potcar_hash (bool): Use potcar hash to verify potcars are correct.
"""
def __init__(self, compat_type="Advanced", correct_peroxide=True,
check_potcar_hash=False):
self.compat_type = compat_type
self.correct_peroxide = correct_peroxide
self.check_potcar_hash = check_potcar_hash
fp = os.path.join(MODULE_DIR, "MPCompatibility.yaml")
super(MaterialsProjectAqueousCompatibility, self).__init__(
[PotcarCorrection(MPRelaxSet, check_hash=check_potcar_hash),
GasCorrection(fp),
AnionCorrection(fp, correct_peroxide=correct_peroxide),
UCorrection(fp, MPRelaxSet, compat_type), AqueousCorrection(fp)])
| |
"""
This module implements the recktype() factory function and DefaultFactory
class for creating lightweight record classes with mutable fields and optional
per-field defaults.
:copyright: (c) 2015 by Mark Richards.
:license: BSD 3-Clause, see LICENSE.txt for more details.
"""
import collections
import keyword
import operator
import sys
__license__ = 'BSD 3-clause'
__version__ = '0.0.0'
__author__ = 'Mark Richards'
__email__ = 'mark.l.a.richardsREMOVETHIS@gmail.com'
def recktype(typename, fieldnames, rename=False):
"""
Create a new record class with fields accessible by named attributes.
The new type is a subclass of ``collections.Sequence`` named *typename*.
The new subclass is used to create record objects that have fields
accessible by attribute lookup as well as being indexable and
iterable. Per-field default values can be set. These are assigned
to fields that are not supplied a value during instantiation.
Basic example::
>>> from reck import recktype
>>> Point3D = recktype('Point3D', 'x y z') # Create new record type
>>> p = Point3D(x=1, y=2, z=3)
>>> # Make a new Point3D class in which 'z' defaults to zero
>>> Point3D = recktype('Point3D', ['x', 'y', ('z', 0)])
>>> p = Point3D(x=1, y=2)
>>> p # z has been assigned its default value
Point3D(x=1, y=2, z=0)
:param typename: Name of the subclass to create, e.g. ``'MyRecord'``.
:param fieldnames: Specifies the fieldnames and optional per-field
default values of the record. It cam be a single string with each
fieldname separated by whitespace and/or commas such as ``'x, y'``;
a sequence of strings such as ``['x', 'y']`` and/or 2-tuples of the
form ``(fieldname, default_value)`` such as
``[('x', None), ('y', None)]``; a mapping of fieldname-default_value
pairs such as ``collections.OrderedDict([('x', None), ('y', None)])``.
Note, it only makes sense to use an ordered mapping (e.g.
``OrderedDict``) since access by index or iteration is affected by the
order of the fieldnames.
A fieldname may be any valid Python identifier except for names
starting with an underscore.
:param rename: If set to ``True``, invalid fieldnames are automatically
replaced with positional names. For example,
('abc', 'def', 'ghi', 'abc') is converted to
('abc', '_1', 'ghi', '_3'), eliminating the keyword 'def' and the
duplicate fieldname 'abc'.
:returns: A subclass of of collections.Sequence named *typename*.
:raises ValueError: if *typename* is invalid; *fieldnames* contains
an invalid fieldname and rename is ``False``; *fieldnames*
contains a sequence that is not length 2.
:raises TypeError: if a fieldname is neither a string or a sequence.
"""
_validate_typename(typename)
if isinstance(fieldnames, collections.Mapping):
# Convert mapping to a sequence of (fieldname, value) tuples
fieldnames = list(fieldnames.items())
elif isinstance(fieldnames, str):
fieldnames = fieldnames.replace(',', ' ').split()
fieldnames, defaults = _parse_fieldnames(fieldnames, rename)
default_factory_fields = _get_default_factory_fields(defaults)
# Create the __dict__ of the new record type:
# The new type is composed from module-level functions rather than
# by subclassing a predefined Record base class because this offers
# approach offer greater flexibility for contructing different types
# in the future.
# _fieldnames_set is used to provide fast membership testing
type_dct = dict(
# API methods and attributes:
__init__=__init__,
_fieldnames=tuple(fieldnames),
_update=_update,
_get_defaults=_get_defaults,
_replace_defaults=_replace_defaults,
_asdict=_asdict,
_asitems=_asitems,
# Need to set _count and _index to the baseclass implementation in case
# a fieldname attribute overwrites count or index
_count=collections.Sequence.count,
_index=collections.Sequence.index,
# Internal methods and attributes:
__slots__=tuple(fieldnames),
_fieldnames_set=frozenset(fieldnames), # For fast membership testing
# isintance() testing is slow so store names of fields with default
# factories in a set for fast membership testing.
_default_factory_fields=frozenset(default_factory_fields),
_nfields=len(fieldnames), # For speed
# An operator.attrgetter is stored for each field because it offers
# a slight speedup over getattr(). TODO: test that this holds true
# across platforms and python verions
_attr_getters=tuple(
[operator.attrgetter(field) for field in fieldnames]),
_defaults=defaults,
_check_args=_check_args,
# Special methods
__dict__=property(_asdict),
__eq__=__eq__,
__ne__=__ne__,
__getstate__=__getstate__,
__setstate__=__setstate__,
__repr__=__repr__,
__str__=__str__,
# Sequence-like methods:
__getitem__=__getitem__,
__setitem__=__setitem__,
__len__=__len__,
)
rectype = type(typename, (collections.Sequence,), type_dct)
# Explanation from collections.namedtuple:
# For pickling to work, the __module__ variable needs to be set to the
# frame where the record type is created. Bypass this step in
# environments where sys._getframe is not defined (Jython for example)
# or sys._getframe is not defined for arguments greater than 0
# (e.g. IronPython).
try:
rectype.__module__ = sys._getframe(1).f_globals.get(
'__name__', '__main__')
except (AttributeError, ValueError):
pass
return rectype
def __init__(self, *values_by_field_order, **values_by_fieldname):
"""
Return a new record object.
Field values can be passed by field order, fieldname, or both.
The following examples all return a record equivalent to
``Rec(a=1, b=2, c=3)``::
>>> Rec = recktype('Rec', 'a b c')
>>> rec = Rec(1, 2, 3) # using positional args
>>> rec = Rec(a=1, b=2, c=3) # using keyword args
>>> rec = Rec(*[1, 2, 3]) # using an unpacked sequence
>>> rec = Rec(**dict(a=1, b=2, c=3)) # using an unpacked mapping
>>> rec = Rec(*[1, 2], c=3) # using an unpacked sequence and a keyword arg
>>> rec
Rec(a=1, b=2, c=3)
Since record objects are iterable they can be used to initialise
other objects of the same type by unpacking them::
>>> rec2 = Rec(*rec)
>>> rec2 == rec
True
If a field has not been supplied a value by an argument, its default value
will be used (if one has been defined).
:param *values_by_field_order: Field values passed by field order.
:param **kwargs: Field values passed by fieldname.
:raises TypeError: if the number of positional arguments exceeds the
number of fields, a keyword argument does not match a fieldname,
or a keyword argument redefines a positional argument.
:raises ValueError: if a field has not been defined by the positional
or keyword arguments and has no default value set.
"""
self._check_args(values_by_field_order, values_by_fieldname)
for fieldname, value in zip(self._fieldnames, values_by_field_order):
setattr(self, fieldname, value)
for fieldname in values_by_fieldname:
setattr(self, fieldname, values_by_fieldname[fieldname])
for fieldname in self._fieldnames:
if not hasattr(self, fieldname):
if fieldname in self._defaults:
if fieldname in self._default_factory_fields:
# Call the default factory function (value)
setattr(self, fieldname, self._defaults[fieldname]())
else:
setattr(self, fieldname, self._defaults[fieldname])
else:
raise ValueError('field {0!r} is not defined'.format(fieldname))
def _update(self, *values_by_field_order, **values_by_fieldname):
"""
Update field values.
Update field values with values passed by field order, fieldname, or both.
Example::
>>> Rec = recktype('Rec', 'a b c')
>>> r = Rec(a=1, b=2, c=3)
>>> r._update(b=5, c=6) # using keyword arguments
>>> r
Rec(a=1, b=2, c=3)
>>> r._update(2, 3, c=4) # using positional and keyword arguments
>>> r
Rec(a=2, b=3, c=4)
:param *values_by_field_order: Field values passed by field order.
:param **values_by_fieldname: Field values passed by fieldname.
:raises TypeError: if the number of positional arguments exceeds the
number of fields, a keyword argument does not match a fieldname,
or a keyword argument redefines a positional argument.
"""
self._check_args(values_by_field_order, values_by_fieldname)
for fieldname, value in zip(self._fieldnames, values_by_field_order):
setattr(self, fieldname, value)
for fieldname in values_by_fieldname:
setattr(self, fieldname, values_by_fieldname[fieldname])
def _asdict(self):
"""
Return a new ``collections.OrderedDict`` which maps fieldnames to their
values.
"""
return collections.OrderedDict(zip(self._fieldnames, self))
def _asitems(self):
"""
Return a list of ``(fieldname, value)`` 2-tuples.
"""
return list(zip(self._fieldnames, self))
@classmethod
def _get_defaults(cls):
"""
Return a ``dict`` which maps fieldnames to their corresponding
default value (if they have one). If no default values are set an empty
``dict`` is returned.
::
>>> Point = recktype('Point', [('x', None), ('y', None)])
>>> Point._get_defaults()
{'x': None, 'y': None}
"""
return cls._defaults
@classmethod
def _replace_defaults(cls, *values_by_field_order, **values_by_fieldname):
"""
Replace the existing per-field default values.
The new default field values can be passed by field order, fieldname, or
both.
Changing the defaults can be useful if you wish to use the same record
class in different contexts which require different default values.
:param *values_by_field_order: Default field values passed by field order.
:param **values_by_fieldname: Default field values passed by fieldname.
:raises TypeError: if the number of positional arguments exceeds the
number of fields, a keyword argument does not match a fieldname,
or a keyword argument redefines a positional argument.
"""
cls._check_args(values_by_field_order, values_by_fieldname)
defaults = {}
defaults.update(zip(cls._fieldnames, values_by_field_order))
defaults.update(values_by_fieldname)
cls._defaults = defaults
cls._default_factory_fields = frozenset(
_get_default_factory_fields(defaults))
@classmethod
def _check_args(cls, values_by_field_order, values_by_fieldname):
"""
Check validity of positional and keyword arguments.
"""
if len(values_by_field_order) > cls._nfields:
raise TypeError(
'takes up to {0} positional arguments but {1} were given'
.format(cls._nfields, len(values_by_field_order)))
# Check that every keyword argument in kwargs matches a fieldname.
for fieldname in values_by_fieldname:
if fieldname not in cls._fieldnames_set:
raise TypeError(
'keyword argument {0!r} does not match a field'
.format(fieldname))
# Check that none of the keyword args are redefining a positional arg
for _, fieldname, in zip(values_by_field_order, cls._fieldnames):
if fieldname in values_by_fieldname:
raise TypeError(
'got multiple values for argument {0!r}'.format(fieldname))
def __eq__(self, other):
return (isinstance(other, self.__class__)
and self.__dict__ == other.__dict__)
def __ne__(self, other):
return not self.__eq__(other)
def __getitem__(self, index):
"""
Retrieve a field or slice of fields from the record using an index.
Args:
index: int or slice object
Index can be an integer or slice object for normal sequence
item access.
Returns:
If index is an integer the value of the field corresponding to
the index is returned. If index is a slice a list of field values
corresponding to the slice indices is returned.
"""
if isinstance(index, int):
return self._attr_getters[index](self)
# Slice object
return [getter(self) for getter in self._attr_getters[index]]
def __setitem__(self, index, value):
"""
Note: if index is a slice and value is longer than the slice then
the surplus values are discarded. This behaviour differs from that
of list.__setitem__ which inserts the surplus values into the list.
Similarly, if value contains too few values, the surplus fields are
left unaffected. With a list, the surplus items are deleted.
Args:
index: int or slice object
Index/slice to be set.
value: any
Value to set.
"""
if isinstance(index, int):
setattr(self, self._fieldnames[index], value)
else: # Slice object
fields = self._fieldnames[index]
for field, v in zip(fields, value):
setattr(self, field, v)
def __getstate__(self):
"""
Return self as a tuple to allow the record to be pickled.
"""
return tuple(self)
def __setstate__(self, state):
"""
Re-initialise the record from the unpickled tuple representation.
"""
for attr, value in zip(self._fieldnames, state):
setattr(self, attr, value)
def __len__(self):
return self._nfields
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__, ', '.join('{}={}'.format(
attr, repr(getattr(self, attr))) for attr in self._fieldnames))
def __str__(self):
return '{}({})'.format(
self.__class__.__name__, ', '.join('{}={}'.format(
attr, str(getattr(self, attr))) for attr in self._fieldnames))
# ------------------------------------------------------------------------------
# Helper functions
def _get_default_factory_fields(defaults):
"""
Return a list of fieldnames that have a factory function default.
:param defaults: a fieldname/default_value mapping.
"""
default_factory_fields = []
for fieldname, default_value in defaults.items():
if isinstance(default_value, DefaultFactory):
default_factory_fields.append(fieldname)
return default_factory_fields
def _parse_fieldnames(fieldnames, rename):
"""
Process a sequence of fieldname strings and/or (fieldname, default) tuples,
creating a list of corrected fieldnames and a map of fieldname to
default-values.
"""
defaults = {}
validated_fieldnames = []
used_names = set()
for idx, fieldname in enumerate(fieldnames):
if isinstance(fieldname, str):
has_default = False
else:
try:
if len(fieldname) != 2:
raise ValueError(
'fieldname should be a (fieldname, default_value) '
'2-tuple'.format(fieldname))
except TypeError:
raise TypeError(
'fieldname should be a string, or a '
'(fieldname, default_value) 2-tuple'.format(fieldname))
has_default = True
default = fieldname[1]
fieldname = fieldname[0]
fieldname = _validate_fieldname(fieldname, used_names, rename, idx)
validated_fieldnames.append(fieldname)
used_names.add(fieldname)
if has_default:
defaults[fieldname] = default
return validated_fieldnames, defaults
def _validate_fieldname(fieldname, used_names, rename, idx):
"""
Return fieldname if it is valid, a renamed fieldname if it is invalid
and *rename* is True, else raise a ValueError.
:param fieldname: fieldname to validate.
:param used_names:: set of fieldnames that have already been used.
:param rename: If True invalid fieldnames are replaced with a valid name.
:param idx: integer index of fieldname in the class fieldnames sequence.
Used in the renaming of invalid fieldnames.
:returns: The fieldname, which may have been renamed if it was invalid and
rename is True.
:raises ValueError: if the fieldname is invalid and rename is False.
"""
try:
_validate_name(fieldname, 'field')
# Validation specific to fieldnames:
if fieldname.startswith('_'):
raise ValueError(
'fieldname cannot start with an underscore: {0!r}'
.format(fieldname))
if fieldname in used_names:
raise ValueError(
'encountered duplicate fieldname: {0!r}'.format(fieldname))
except (ValueError, TypeError):
if rename:
return '_{0}'.format(idx)
else:
raise
return fieldname
def _validate_typename(typename):
"""
Raise a ValueError if typename is invalid.
"""
_validate_name(typename, 'type')
def _validate_name(name, nametype):
"""
Perform name validation common to both type names and fieldnames.
"""
if not name.isidentifier():
raise ValueError(
'{0}name must be a valid identifiers: {1:!r}'
.format(nametype, name))
if keyword.iskeyword(name):
raise ValueError(
'{0}name cannot be a keyword: {1!r}'.format(nametype, name))
class DefaultFactory(object):
"""
Wrap a default factory function.
Default factory functions must be wrapped using this class so that they
can be distinguished from non-factory callable default values. The *args*
and *kwargs* arguments can be used to specify optional positional and
keyword arguments to be passed to the factory function when it is called.
Example of setting ``list`` as a default factory during record type
creation::
>>> from reck import DefaultFactory
>>> Car = recktype('Car', [
... 'make',
... 'model',
... ('colours', DefaultFactory(list))])
>>> car = Car(make='Lotus', model='Exige')
>>> car.colours.append('Orange')
>>> car.colours.append('Green')
>>> car
Car(name='Lotus', model='Exige', colours=['Orange', 'Green'])
An example using ``dict`` with positional and keyword arguments
as a default factory::
>>> Rec = recktype('Rec', [
... ('a', DefaultFactory(dict, args=[[('b', 2)]], kwargs=dict(c=3)))])
>>> rec = Rec() # field 'a' will be set using the default factory
>>> rec.a
{'b': 2, 'c': 3}
:param factory_func: the callable object to be invoked as a default
factory function (with *args* and *kwargs* if provided).
:param args: a tuple of arguments for the factory function invocation.
:param kwargs: a dictionary of keyword arguments for the factory function
invocation.
"""
def __init__(self, factory_func, args=(), kwargs={}):
self._factory_func = factory_func
self._args = args
self._kwargs = kwargs
def __call__(self):
return self._factory_func(*self._args, **self._kwargs)
def __repr__(self):
return ('DefaultFactory({0!r}, args={1!r}, kwargs={2!r})'
.format(self._factory_func, self._args, self._kwargs))
| |
import unittest
from django.conf import settings
from django.core.checks import Error
from django.core.checks.model_checks import _check_lazy_references
from django.core.exceptions import ImproperlyConfigured
from django.db import connections, models
from django.db.models.signals import post_init
from django.test import SimpleTestCase
from django.test.utils import isolate_apps, override_settings
def get_max_column_name_length():
allowed_len = None
db_alias = None
for db in settings.DATABASES.keys():
connection = connections[db]
max_name_length = connection.ops.max_name_length()
if max_name_length is None or connection.features.truncates_names:
continue
else:
if allowed_len is None:
allowed_len = max_name_length
db_alias = db
elif max_name_length < allowed_len:
allowed_len = max_name_length
db_alias = db
return (allowed_len, db_alias)
@isolate_apps('invalid_models_tests')
class IndexTogetherTests(SimpleTestCase):
def test_non_iterable(self):
class Model(models.Model):
class Meta:
index_together = 42
errors = Model.check()
expected = [
Error(
"'index_together' must be a list or tuple.",
obj=Model,
id='models.E008',
),
]
self.assertEqual(errors, expected)
def test_non_list(self):
class Model(models.Model):
class Meta:
index_together = 'not-a-list'
errors = Model.check()
expected = [
Error(
"'index_together' must be a list or tuple.",
obj=Model,
id='models.E008',
),
]
self.assertEqual(errors, expected)
def test_list_containing_non_iterable(self):
class Model(models.Model):
class Meta:
index_together = [('a', 'b'), 42]
errors = Model.check()
expected = [
Error(
"All 'index_together' elements must be lists or tuples.",
obj=Model,
id='models.E009',
),
]
self.assertEqual(errors, expected)
def test_pointing_to_missing_field(self):
class Model(models.Model):
class Meta:
index_together = [
["missing_field"],
]
errors = Model.check()
expected = [
Error(
"'index_together' refers to the non-existent field 'missing_field'.",
obj=Model,
id='models.E012',
),
]
self.assertEqual(errors, expected)
def test_pointing_to_non_local_field(self):
class Foo(models.Model):
field1 = models.IntegerField()
class Bar(Foo):
field2 = models.IntegerField()
class Meta:
index_together = [
["field2", "field1"],
]
errors = Bar.check()
expected = [
Error(
"'index_together' refers to field 'field1' which is not "
"local to model 'Bar'.",
hint=("This issue may be caused by multi-table inheritance."),
obj=Bar,
id='models.E016',
),
]
self.assertEqual(errors, expected)
def test_pointing_to_m2m_field(self):
class Model(models.Model):
m2m = models.ManyToManyField('self')
class Meta:
index_together = [
["m2m"],
]
errors = Model.check()
expected = [
Error(
"'index_together' refers to a ManyToManyField 'm2m', but "
"ManyToManyFields are not permitted in 'index_together'.",
obj=Model,
id='models.E013',
),
]
self.assertEqual(errors, expected)
# unique_together tests are very similar to index_together tests.
@isolate_apps('invalid_models_tests')
class UniqueTogetherTests(SimpleTestCase):
def test_non_iterable(self):
class Model(models.Model):
class Meta:
unique_together = 42
errors = Model.check()
expected = [
Error(
"'unique_together' must be a list or tuple.",
obj=Model,
id='models.E010',
),
]
self.assertEqual(errors, expected)
def test_list_containing_non_iterable(self):
class Model(models.Model):
one = models.IntegerField()
two = models.IntegerField()
class Meta:
unique_together = [('a', 'b'), 42]
errors = Model.check()
expected = [
Error(
"All 'unique_together' elements must be lists or tuples.",
obj=Model,
id='models.E011',
),
]
self.assertEqual(errors, expected)
def test_non_list(self):
class Model(models.Model):
class Meta:
unique_together = 'not-a-list'
errors = Model.check()
expected = [
Error(
"'unique_together' must be a list or tuple.",
obj=Model,
id='models.E010',
),
]
self.assertEqual(errors, expected)
def test_valid_model(self):
class Model(models.Model):
one = models.IntegerField()
two = models.IntegerField()
class Meta:
# unique_together can be a simple tuple
unique_together = ('one', 'two')
errors = Model.check()
self.assertEqual(errors, [])
def test_pointing_to_missing_field(self):
class Model(models.Model):
class Meta:
unique_together = [
["missing_field"],
]
errors = Model.check()
expected = [
Error(
"'unique_together' refers to the non-existent field 'missing_field'.",
obj=Model,
id='models.E012',
),
]
self.assertEqual(errors, expected)
def test_pointing_to_m2m(self):
class Model(models.Model):
m2m = models.ManyToManyField('self')
class Meta:
unique_together = [
["m2m"],
]
errors = Model.check()
expected = [
Error(
"'unique_together' refers to a ManyToManyField 'm2m', but "
"ManyToManyFields are not permitted in 'unique_together'.",
obj=Model,
id='models.E013',
),
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class FieldNamesTests(SimpleTestCase):
def test_ending_with_underscore(self):
class Model(models.Model):
field_ = models.CharField(max_length=10)
m2m_ = models.ManyToManyField('self')
errors = Model.check()
expected = [
Error(
'Field names must not end with an underscore.',
obj=Model._meta.get_field('field_'),
id='fields.E001',
),
Error(
'Field names must not end with an underscore.',
obj=Model._meta.get_field('m2m_'),
id='fields.E001',
),
]
self.assertEqual(errors, expected)
max_column_name_length, column_limit_db_alias = get_max_column_name_length()
@unittest.skipIf(max_column_name_length is None, "The database doesn't have a column name length limit.")
def test_M2M_long_column_name(self):
"""
#13711 -- Model check for long M2M column names when database has
column name length limits.
"""
allowed_len, db_alias = get_max_column_name_length()
# A model with very long name which will be used to set relations to.
class VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(models.Model):
title = models.CharField(max_length=11)
# Main model for which checks will be performed.
class ModelWithLongField(models.Model):
m2m_field = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name="rn1"
)
m2m_field2 = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name="rn2", through='m2msimple'
)
m2m_field3 = models.ManyToManyField(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
related_name="rn3",
through='m2mcomplex'
)
fk = models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
related_name="rn4",
)
# Models used for setting `through` in M2M field.
class m2msimple(models.Model):
id2 = models.ForeignKey(ModelWithLongField, models.CASCADE)
class m2mcomplex(models.Model):
id2 = models.ForeignKey(ModelWithLongField, models.CASCADE)
long_field_name = 'a' * (self.max_column_name_length + 1)
models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
).contribute_to_class(m2msimple, long_field_name)
models.ForeignKey(
VeryLongModelNamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,
models.CASCADE,
db_column=long_field_name
).contribute_to_class(m2mcomplex, long_field_name)
errors = ModelWithLongField.check()
# First error because of M2M field set on the model with long name.
m2m_long_name = "verylongmodelnamezzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz_id"
if self.max_column_name_length > len(m2m_long_name):
# Some databases support names longer than the test name.
expected = []
else:
expected = [
Error(
'Autogenerated column name too long for M2M field "%s". '
'Maximum length is "%s" for database "%s".'
% (m2m_long_name, self.max_column_name_length, self.column_limit_db_alias),
hint="Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'.",
obj=ModelWithLongField,
id='models.E019',
)
]
# Second error because the FK specified in the `through` model
# `m2msimple` has auto-generated name longer than allowed.
# There will be no check errors in the other M2M because it
# specifies db_column for the FK in `through` model even if the actual
# name is longer than the limits of the database.
expected.append(
Error(
'Autogenerated column name too long for M2M field "%s_id". '
'Maximum length is "%s" for database "%s".'
% (long_field_name, self.max_column_name_length, self.column_limit_db_alias),
hint="Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'.",
obj=ModelWithLongField,
id='models.E019',
)
)
self.assertEqual(errors, expected)
@unittest.skipIf(max_column_name_length is None, "The database doesn't have a column name length limit.")
def test_local_field_long_column_name(self):
"""
#13711 -- Model check for long column names
when database does not support long names.
"""
allowed_len, db_alias = get_max_column_name_length()
class ModelWithLongField(models.Model):
title = models.CharField(max_length=11)
long_field_name = 'a' * (self.max_column_name_length + 1)
long_field_name2 = 'b' * (self.max_column_name_length + 1)
models.CharField(max_length=11).contribute_to_class(ModelWithLongField, long_field_name)
models.CharField(max_length=11, db_column='vlmn').contribute_to_class(ModelWithLongField, long_field_name2)
errors = ModelWithLongField.check()
# Error because of the field with long name added to the model
# without specifying db_column
expected = [
Error(
'Autogenerated column name too long for field "%s". '
'Maximum length is "%s" for database "%s".'
% (long_field_name, self.max_column_name_length, self.column_limit_db_alias),
hint="Set the column name manually using 'db_column'.",
obj=ModelWithLongField,
id='models.E018',
)
]
self.assertEqual(errors, expected)
def test_including_separator(self):
class Model(models.Model):
some__field = models.IntegerField()
errors = Model.check()
expected = [
Error(
'Field names must not contain "__".',
obj=Model._meta.get_field('some__field'),
id='fields.E002',
)
]
self.assertEqual(errors, expected)
def test_pk(self):
class Model(models.Model):
pk = models.IntegerField()
errors = Model.check()
expected = [
Error(
"'pk' is a reserved word that cannot be used as a field name.",
obj=Model._meta.get_field('pk'),
id='fields.E003',
)
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class ShadowingFieldsTests(SimpleTestCase):
def test_field_name_clash_with_child_accessor(self):
class Parent(models.Model):
pass
class Child(Parent):
child = models.CharField(max_length=100)
errors = Child.check()
expected = [
Error(
"The field 'child' clashes with the field "
"'child' from model 'invalid_models_tests.parent'.",
obj=Child._meta.get_field('child'),
id='models.E006',
)
]
self.assertEqual(errors, expected)
def test_multiinheritance_clash(self):
class Mother(models.Model):
clash = models.IntegerField()
class Father(models.Model):
clash = models.IntegerField()
class Child(Mother, Father):
# Here we have two clashed: id (automatic field) and clash, because
# both parents define these fields.
pass
errors = Child.check()
expected = [
Error(
"The field 'id' from parent model "
"'invalid_models_tests.mother' clashes with the field 'id' "
"from parent model 'invalid_models_tests.father'.",
obj=Child,
id='models.E005',
),
Error(
"The field 'clash' from parent model "
"'invalid_models_tests.mother' clashes with the field 'clash' "
"from parent model 'invalid_models_tests.father'.",
obj=Child,
id='models.E005',
)
]
self.assertEqual(errors, expected)
def test_inheritance_clash(self):
class Parent(models.Model):
f_id = models.IntegerField()
class Target(models.Model):
# This field doesn't result in a clash.
f_id = models.IntegerField()
class Child(Parent):
# This field clashes with parent "f_id" field.
f = models.ForeignKey(Target, models.CASCADE)
errors = Child.check()
expected = [
Error(
"The field 'f' clashes with the field 'f_id' "
"from model 'invalid_models_tests.parent'.",
obj=Child._meta.get_field('f'),
id='models.E006',
)
]
self.assertEqual(errors, expected)
def test_multigeneration_inheritance(self):
class GrandParent(models.Model):
clash = models.IntegerField()
class Parent(GrandParent):
pass
class Child(Parent):
pass
class GrandChild(Child):
clash = models.IntegerField()
errors = GrandChild.check()
expected = [
Error(
"The field 'clash' clashes with the field 'clash' "
"from model 'invalid_models_tests.grandparent'.",
obj=GrandChild._meta.get_field('clash'),
id='models.E006',
)
]
self.assertEqual(errors, expected)
def test_id_clash(self):
class Target(models.Model):
pass
class Model(models.Model):
fk = models.ForeignKey(Target, models.CASCADE)
fk_id = models.IntegerField()
errors = Model.check()
expected = [
Error(
"The field 'fk_id' clashes with the field 'fk' from model "
"'invalid_models_tests.model'.",
obj=Model._meta.get_field('fk_id'),
id='models.E006',
)
]
self.assertEqual(errors, expected)
@isolate_apps('invalid_models_tests')
class OtherModelTests(SimpleTestCase):
def test_unique_primary_key(self):
invalid_id = models.IntegerField(primary_key=False)
class Model(models.Model):
id = invalid_id
errors = Model.check()
expected = [
Error(
"'id' can only be used as a field name if the field also sets "
"'primary_key=True'.",
obj=Model,
id='models.E004',
),
]
self.assertEqual(errors, expected)
def test_ordering_non_iterable(self):
class Model(models.Model):
class Meta:
ordering = "missing_field"
errors = Model.check()
expected = [
Error(
"'ordering' must be a tuple or list "
"(even if you want to order by only one field).",
obj=Model,
id='models.E014',
),
]
self.assertEqual(errors, expected)
def test_just_ordering_no_errors(self):
class Model(models.Model):
order = models.PositiveIntegerField()
class Meta:
ordering = ['order']
self.assertEqual(Model.check(), [])
def test_just_order_with_respect_to_no_errors(self):
class Question(models.Model):
pass
class Answer(models.Model):
question = models.ForeignKey(Question, models.CASCADE)
class Meta:
order_with_respect_to = 'question'
self.assertEqual(Answer.check(), [])
def test_ordering_with_order_with_respect_to(self):
class Question(models.Model):
pass
class Answer(models.Model):
question = models.ForeignKey(Question, models.CASCADE)
order = models.IntegerField()
class Meta:
order_with_respect_to = 'question'
ordering = ['order']
errors = Answer.check()
expected = [
Error(
"'ordering' and 'order_with_respect_to' cannot be used together.",
obj=Answer,
id='models.E021',
),
]
self.assertEqual(errors, expected)
def test_non_valid(self):
class RelationModel(models.Model):
pass
class Model(models.Model):
relation = models.ManyToManyField(RelationModel)
class Meta:
ordering = ['relation']
errors = Model.check()
expected = [
Error(
"'ordering' refers to the non-existent field 'relation'.",
obj=Model,
id='models.E015',
),
]
self.assertEqual(errors, expected)
def test_ordering_pointing_to_missing_field(self):
class Model(models.Model):
class Meta:
ordering = ("missing_field",)
errors = Model.check()
expected = [
Error(
"'ordering' refers to the non-existent field 'missing_field'.",
obj=Model,
id='models.E015',
)
]
self.assertEqual(errors, expected)
def test_ordering_pointing_to_missing_foreignkey_field(self):
# refs #22711
class Model(models.Model):
missing_fk_field = models.IntegerField()
class Meta:
ordering = ("missing_fk_field_id",)
errors = Model.check()
expected = [
Error(
"'ordering' refers to the non-existent field 'missing_fk_field_id'.",
obj=Model,
id='models.E015',
)
]
self.assertEqual(errors, expected)
def test_ordering_pointing_to_existing_foreignkey_field(self):
# refs #22711
class Parent(models.Model):
pass
class Child(models.Model):
parent = models.ForeignKey(Parent, models.CASCADE)
class Meta:
ordering = ("parent_id",)
self.assertFalse(Child.check())
def test_name_beginning_with_underscore(self):
class _Model(models.Model):
pass
self.assertEqual(_Model.check(), [
Error(
"The model name '_Model' cannot start or end with an underscore "
"as it collides with the query lookup syntax.",
obj=_Model,
id='models.E023',
)
])
def test_name_ending_with_underscore(self):
class Model_(models.Model):
pass
self.assertEqual(Model_.check(), [
Error(
"The model name 'Model_' cannot start or end with an underscore "
"as it collides with the query lookup syntax.",
obj=Model_,
id='models.E023',
)
])
def test_name_contains_double_underscores(self):
class Test__Model(models.Model):
pass
self.assertEqual(Test__Model.check(), [
Error(
"The model name 'Test__Model' cannot contain double underscores "
"as it collides with the query lookup syntax.",
obj=Test__Model,
id='models.E024',
)
])
@override_settings(TEST_SWAPPED_MODEL_BAD_VALUE='not-a-model')
def test_swappable_missing_app_name(self):
class Model(models.Model):
class Meta:
swappable = 'TEST_SWAPPED_MODEL_BAD_VALUE'
errors = Model.check()
expected = [
Error(
"'TEST_SWAPPED_MODEL_BAD_VALUE' is not of the form 'app_label.app_name'.",
id='models.E001',
),
]
self.assertEqual(errors, expected)
@override_settings(TEST_SWAPPED_MODEL_BAD_MODEL='not_an_app.Target')
def test_swappable_missing_app(self):
class Model(models.Model):
class Meta:
swappable = 'TEST_SWAPPED_MODEL_BAD_MODEL'
errors = Model.check()
expected = [
Error(
"'TEST_SWAPPED_MODEL_BAD_MODEL' references 'not_an_app.Target', "
'which has not been installed, or is abstract.',
id='models.E002',
),
]
self.assertEqual(errors, expected)
def test_two_m2m_through_same_relationship(self):
class Person(models.Model):
pass
class Group(models.Model):
primary = models.ManyToManyField(Person, through="Membership", related_name="primary")
secondary = models.ManyToManyField(Person, through="Membership", related_name="secondary")
class Membership(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
group = models.ForeignKey(Group, models.CASCADE)
errors = Group.check()
expected = [
Error(
"The model has two many-to-many relations through "
"the intermediate model 'invalid_models_tests.Membership'.",
obj=Group,
id='models.E003',
)
]
self.assertEqual(errors, expected)
def test_missing_parent_link(self):
msg = 'Add parent_link=True to invalid_models_tests.ParkingLot.parent.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
class Place(models.Model):
pass
class ParkingLot(Place):
parent = models.OneToOneField(Place, models.CASCADE)
def test_m2m_table_name_clash(self):
class Foo(models.Model):
bar = models.ManyToManyField('Bar', db_table='myapp_bar')
class Meta:
db_table = 'myapp_foo'
class Bar(models.Model):
class Meta:
db_table = 'myapp_bar'
self.assertEqual(Foo.check(), [
Error(
"The field's intermediary table 'myapp_bar' clashes with the "
"table name of 'invalid_models_tests.Bar'.",
obj=Foo._meta.get_field('bar'),
id='fields.E340',
)
])
def test_m2m_field_table_name_clash(self):
class Foo(models.Model):
pass
class Bar(models.Model):
foos = models.ManyToManyField(Foo, db_table='clash')
class Baz(models.Model):
foos = models.ManyToManyField(Foo, db_table='clash')
self.assertEqual(Bar.check() + Baz.check(), [
Error(
"The field's intermediary table 'clash' clashes with the "
"table name of 'invalid_models_tests.Baz.foos'.",
obj=Bar._meta.get_field('foos'),
id='fields.E340',
),
Error(
"The field's intermediary table 'clash' clashes with the "
"table name of 'invalid_models_tests.Bar.foos'.",
obj=Baz._meta.get_field('foos'),
id='fields.E340',
)
])
def test_m2m_autogenerated_table_name_clash(self):
class Foo(models.Model):
class Meta:
db_table = 'bar_foos'
class Bar(models.Model):
# The autogenerated `db_table` will be bar_foos.
foos = models.ManyToManyField(Foo)
class Meta:
db_table = 'bar'
self.assertEqual(Bar.check(), [
Error(
"The field's intermediary table 'bar_foos' clashes with the "
"table name of 'invalid_models_tests.Foo'.",
obj=Bar._meta.get_field('foos'),
id='fields.E340',
)
])
def test_m2m_unmanaged_shadow_models_not_checked(self):
class A1(models.Model):
pass
class C1(models.Model):
mm_a = models.ManyToManyField(A1, db_table='d1')
# Unmanaged models that shadow the above models. Reused table names
# shouldn't be flagged by any checks.
class A2(models.Model):
class Meta:
managed = False
class C2(models.Model):
mm_a = models.ManyToManyField(A2, through='Intermediate')
class Meta:
managed = False
class Intermediate(models.Model):
a2 = models.ForeignKey(A2, models.CASCADE, db_column='a1_id')
c2 = models.ForeignKey(C2, models.CASCADE, db_column='c1_id')
class Meta:
db_table = 'd1'
managed = False
self.assertEqual(C1.check(), [])
self.assertEqual(C2.check(), [])
def test_m2m_to_concrete_and_proxy_allowed(self):
class A(models.Model):
pass
class Through(models.Model):
a = models.ForeignKey('A', models.CASCADE)
c = models.ForeignKey('C', models.CASCADE)
class ThroughProxy(Through):
class Meta:
proxy = True
class C(models.Model):
mm_a = models.ManyToManyField(A, through=Through)
mm_aproxy = models.ManyToManyField(A, through=ThroughProxy, related_name='proxied_m2m')
self.assertEqual(C.check(), [])
@isolate_apps('django.contrib.auth', kwarg_name='apps')
def test_lazy_reference_checks(self, apps):
class DummyModel(models.Model):
author = models.ForeignKey('Author', models.CASCADE)
class Meta:
app_label = 'invalid_models_tests'
class DummyClass:
def __call__(self, **kwargs):
pass
def dummy_method(self):
pass
def dummy_function(*args, **kwargs):
pass
apps.lazy_model_operation(dummy_function, ('auth', 'imaginarymodel'))
apps.lazy_model_operation(dummy_function, ('fanciful_app', 'imaginarymodel'))
post_init.connect(dummy_function, sender='missing-app.Model', apps=apps)
post_init.connect(DummyClass(), sender='missing-app.Model', apps=apps)
post_init.connect(DummyClass().dummy_method, sender='missing-app.Model', apps=apps)
expected = [
Error(
"%r contains a lazy reference to auth.imaginarymodel, "
"but app 'auth' doesn't provide model 'imaginarymodel'." % dummy_function,
obj=dummy_function,
id='models.E022',
),
Error(
"%r contains a lazy reference to fanciful_app.imaginarymodel, "
"but app 'fanciful_app' isn't installed." % dummy_function,
obj=dummy_function,
id='models.E022',
),
Error(
"An instance of class 'DummyClass' was connected to "
"the 'post_init' signal with a lazy reference to the sender "
"'missing-app.model', but app 'missing-app' isn't installed.",
hint=None,
obj='invalid_models_tests.test_models',
id='signals.E001',
),
Error(
"Bound method 'DummyClass.dummy_method' was connected to the "
"'post_init' signal with a lazy reference to the sender "
"'missing-app.model', but app 'missing-app' isn't installed.",
hint=None,
obj='invalid_models_tests.test_models',
id='signals.E001',
),
Error(
"The field invalid_models_tests.DummyModel.author was declared "
"with a lazy reference to 'invalid_models_tests.author', but app "
"'invalid_models_tests' isn't installed.",
hint=None,
obj=DummyModel.author.field,
id='fields.E307',
),
Error(
"The function 'dummy_function' was connected to the 'post_init' "
"signal with a lazy reference to the sender "
"'missing-app.model', but app 'missing-app' isn't installed.",
hint=None,
obj='invalid_models_tests.test_models',
id='signals.E001',
),
]
self.assertEqual(_check_lazy_references(apps), expected)
| |
"""
Test suite for the productdb.excel_import module
"""
import os
import pandas as pd
import pytest
import datetime
from django.contrib.auth.models import User
from app.productdb.excel_import import ProductsExcelImporter, InvalidImportFormatException, InvalidExcelFileFormat, \
ProductMigrationsExcelImporter
from app.productdb import models
from app.productdb.models import Product, Vendor, ProductGroup, ProductMigrationSource, ProductMigrationOption
pytestmark = pytest.mark.django_db
PRODUCTS_TEST_DATA_COLUMNS = [
"product id",
"description",
"list price",
"currency",
"vendor",
"eox update timestamp",
"eol announcement date",
"end of sale date",
"end of new service attachment date",
"end of sw maintenance date",
"end of routing failure analysis date",
"end of service contract renewal date",
"last date of support",
"end of security/vulnerability support date",
"internal product id",
"tags"
]
PRODUCT_MIGRATION_TEST_DATA_COLUMNS = [
"product id",
"vendor",
"migration source",
"replacement product id",
"comment",
"migration product info url"
]
DEFAULT_PRODUCT_TEST_DATA = pd.DataFrame(
[
[
"Product A",
"description of Product A",
"4000.00",
"USD",
"Cisco Systems",
datetime.datetime(2016, 1, 1),
datetime.datetime(2016, 1, 2),
datetime.datetime(2016, 1, 3),
datetime.datetime(2016, 1, 4),
datetime.datetime(2016, 1, 5),
datetime.datetime(2016, 1, 6),
datetime.datetime(2016, 1, 7),
datetime.datetime(2016, 1, 8),
datetime.datetime(2016, 1, 9),
"12345",
"chassis"
],
[
"Product B",
"description of Product B",
"6000.00",
"USD",
"Cisco Systems",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
]
], columns=PRODUCTS_TEST_DATA_COLUMNS
)
CURRENT_PRODUCT_MIGRATION_TEST_DATA = pd.DataFrame(
[
[
"Product A",
"Cisco Systems",
"New Migration Source",
"Replacement Product ID",
"comment of the migration",
"https://localhost"
],
[
"Product A",
"Cisco Systems",
"Existing Migration Source",
"Replacement Product ID",
"comment of the migration",
"https://localhost"
]
], columns=PRODUCT_MIGRATION_TEST_DATA_COLUMNS
)
CURRENT_PRODUCT_TEST_DATA = DEFAULT_PRODUCT_TEST_DATA.copy()
class BaseProductsExcelImporterMock(ProductsExcelImporter):
def verify_file(self):
# set validation to true unconditional
self.valid_file = True
def _load_workbook(self):
# ignore the load workbook function
return
def _create_data_frame(self):
# add a predefined DataFrame for the file import
self.__wb_data_frame__ = CURRENT_PRODUCT_TEST_DATA
class BaseProductMigrationsExcelImporterMock(ProductMigrationsExcelImporter):
def verify_file(self):
# set validation to true unconditional
self.valid_file = True
def _load_workbook(self):
# ignore the load workbook function
return
def _create_data_frame(self):
# add a predefined DataFrame for the file import
self.__wb_data_frame__ = CURRENT_PRODUCT_MIGRATION_TEST_DATA
@pytest.fixture
def apply_base_import_products_excel_file_mock(monkeypatch):
monkeypatch.setattr("test_productdb_excel_import.ProductsExcelImporter", BaseProductsExcelImporterMock)
monkeypatch.setattr("test_productdb_excel_import.ProductMigrationsExcelImporter",
BaseProductMigrationsExcelImporterMock)
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
class TestProductsExcelImporter:
@pytest.mark.usefixtures("apply_base_import_products_excel_file_mock")
def test_valid_import(self):
product_file = ProductsExcelImporter("virtual_file.xlsx")
assert product_file.is_valid_file() is False
product_file.verify_file()
assert product_file.is_valid_file() is True
product_file.import_to_database()
assert product_file.amount_of_products == 2
assert Product.objects.count() == 2
p = Product.objects.get(product_id="Product A")
assert p.description == "description of Product A"
assert p.list_price == 4000.0
assert p.currency == "USD"
assert p.vendor == Vendor.objects.get(id=1)
assert p.eox_update_time_stamp == datetime.date(2016, 1, 1)
assert p.eol_ext_announcement_date == datetime.date(2016, 1, 2)
assert p.end_of_sale_date == datetime.date(2016, 1, 3)
assert p.end_of_new_service_attachment_date == datetime.date(2016, 1, 4)
assert p.end_of_sw_maintenance_date == datetime.date(2016, 1, 5)
assert p.end_of_routine_failure_analysis == datetime.date(2016, 1, 6)
assert p.end_of_service_contract_renewal == datetime.date(2016, 1, 7)
assert p.end_of_support_date == datetime.date(2016, 1, 8)
assert p.end_of_sec_vuln_supp_date == datetime.date(2016, 1, 9)
assert p.internal_product_id == "12345"
assert p.tags == "chassis"
p = Product.objects.get(product_id="Product B")
assert p.description == "description of Product B"
assert p.list_price == 6000.0
assert p.currency == "USD"
assert p.vendor == Vendor.objects.get(id=1)
assert p.eox_update_time_stamp is None
assert p.eol_ext_announcement_date is None
assert p.end_of_sale_date is None
assert p.end_of_new_service_attachment_date is None
assert p.end_of_sw_maintenance_date is None
assert p.end_of_routine_failure_analysis is None
assert p.end_of_service_contract_renewal is None
assert p.end_of_support_date is None
assert p.end_of_sec_vuln_supp_date is None
assert p.internal_product_id == ""
assert p.tags == ""
@pytest.mark.usefixtures("apply_base_import_products_excel_file_mock")
def test_import_with_list_price_of_zero(self):
"""Should ensure that a list price of 0 is saved as 0 value, not None/Null value"""
global CURRENT_PRODUCT_TEST_DATA
CURRENT_PRODUCT_TEST_DATA = pd.DataFrame(
[
[
"Product A",
"description of Product A",
"0",
"USD",
"Cisco Systems",
datetime.datetime(2016, 1, 1),
datetime.datetime(2016, 1, 2),
datetime.datetime(2016, 1, 3),
datetime.datetime(2016, 1, 4),
datetime.datetime(2016, 1, 5),
datetime.datetime(2016, 1, 6),
datetime.datetime(2016, 1, 7),
datetime.datetime(2016, 1, 8),
datetime.datetime(2016, 1, 9),
"",
""
],
[
"Product B",
"description of Product B",
"0.00",
"USD",
"Cisco Systems",
datetime.datetime(2016, 1, 1),
datetime.datetime(2016, 1, 2),
datetime.datetime(2016, 1, 3),
datetime.datetime(2016, 1, 4),
datetime.datetime(2016, 1, 5),
datetime.datetime(2016, 1, 6),
datetime.datetime(2016, 1, 7),
datetime.datetime(2016, 1, 8),
datetime.datetime(2016, 1, 9),
"",
""
],
[
"Product A2",
"description of Product A",
0,
"USD",
"Cisco Systems",
datetime.datetime(2016, 1, 1),
datetime.datetime(2016, 1, 2),
datetime.datetime(2016, 1, 3),
datetime.datetime(2016, 1, 4),
datetime.datetime(2016, 1, 5),
datetime.datetime(2016, 1, 6),
datetime.datetime(2016, 1, 7),
datetime.datetime(2016, 1, 8),
datetime.datetime(2016, 1, 9),
"",
""
],
[
"Product B2",
"description of Product B",
0.00,
"USD",
"Cisco Systems",
datetime.datetime(2016, 1, 1),
datetime.datetime(2016, 1, 2),
datetime.datetime(2016, 1, 3),
datetime.datetime(2016, 1, 4),
datetime.datetime(2016, 1, 5),
datetime.datetime(2016, 1, 6),
datetime.datetime(2016, 1, 7),
datetime.datetime(2016, 1, 8),
datetime.datetime(2016, 1, 9),
"",
""
],
[
"Product C",
"description of Product C",
"",
"USD",
"Cisco Systems",
datetime.datetime(2016, 1, 1),
datetime.datetime(2016, 1, 2),
datetime.datetime(2016, 1, 3),
datetime.datetime(2016, 1, 4),
datetime.datetime(2016, 1, 5),
datetime.datetime(2016, 1, 6),
datetime.datetime(2016, 1, 7),
datetime.datetime(2016, 1, 8),
datetime.datetime(2016, 1, 9),
"",
""
]
], columns=PRODUCTS_TEST_DATA_COLUMNS
)
user = User.objects.get(username="api")
product_file = ProductsExcelImporter(
"virtual_file.xlsx",
user_for_revision=user
)
product_file.verify_file()
product_file.import_to_database()
assert Product.objects.count() == 5
# verify imported data
pa = Product.objects.get(product_id="Product A")
assert pa.list_price is not None
assert pa.list_price == 0.00
pa2 = Product.objects.get(product_id="Product A2")
assert pa2.list_price is not None
assert pa2.list_price == 0.00
pb = Product.objects.get(product_id="Product B")
assert pb.list_price is not None
assert pb.list_price == 0.00
pb2 = Product.objects.get(product_id="Product B2")
assert pb2.list_price is not None
assert pb2.list_price == 0.00
pc = Product.objects.get(product_id="Product C")
assert pc.list_price is None, "No list price provided, therefore it should be None"
@pytest.mark.usefixtures("apply_base_import_products_excel_file_mock")
def test_import_with_list_price_of_zero(self):
"""Should ensure that a list price of 0 is saved as 0 value, not None/Null value"""
global CURRENT_PRODUCT_TEST_DATA
CURRENT_PRODUCT_TEST_DATA = pd.DataFrame(
[
[
"Product A",
"description of Product A",
"0",
"USD",
"Cisco Systems",
datetime.datetime(2016, 1, 1),
datetime.datetime(2016, 1, 2),
datetime.datetime(2016, 1, 3),
datetime.datetime(2016, 1, 4),
datetime.datetime(2016, 1, 5),
datetime.datetime(2016, 1, 6),
datetime.datetime(2016, 1, 7),
datetime.datetime(2016, 1, 8),
datetime.datetime(2016, 1, 9),
"",
""
],
[
"Product B",
"description of Product B",
"0.00",
"USD",
"Cisco Systems",
datetime.datetime(2016, 1, 1),
datetime.datetime(2016, 1, 2),
datetime.datetime(2016, 1, 3),
datetime.datetime(2016, 1, 4),
datetime.datetime(2016, 1, 5),
datetime.datetime(2016, 1, 6),
datetime.datetime(2016, 1, 7),
datetime.datetime(2016, 1, 8),
datetime.datetime(2016, 1, 9),
"",
""
],
[
"Product A2",
"description of Product A",
0,
"USD",
"Cisco Systems",
datetime.datetime(2016, 1, 1),
datetime.datetime(2016, 1, 2),
datetime.datetime(2016, 1, 3),
datetime.datetime(2016, 1, 4),
datetime.datetime(2016, 1, 5),
datetime.datetime(2016, 1, 6),
datetime.datetime(2016, 1, 7),
datetime.datetime(2016, 1, 8),
datetime.datetime(2016, 1, 9),
"",
""
],
[
"Product B2",
"description of Product B",
0.00,
"USD",
"Cisco Systems",
datetime.datetime(2016, 1, 1),
datetime.datetime(2016, 1, 2),
datetime.datetime(2016, 1, 3),
datetime.datetime(2016, 1, 4),
datetime.datetime(2016, 1, 5),
datetime.datetime(2016, 1, 6),
datetime.datetime(2016, 1, 7),
datetime.datetime(2016, 1, 8),
datetime.datetime(2016, 1, 9),
"",
""
],
[
"Product C",
"description of Product C",
"",
"USD",
"Cisco Systems",
datetime.datetime(2016, 1, 1),
datetime.datetime(2016, 1, 2),
datetime.datetime(2016, 1, 3),
datetime.datetime(2016, 1, 4),
datetime.datetime(2016, 1, 5),
datetime.datetime(2016, 1, 6),
datetime.datetime(2016, 1, 7),
datetime.datetime(2016, 1, 8),
datetime.datetime(2016, 1, 9),
"",
""
]
], columns=PRODUCTS_TEST_DATA_COLUMNS
)
user = User.objects.get(username="api")
product_file = ProductsExcelImporter(
"virtual_file.xlsx",
user_for_revision=user
)
product_file.verify_file()
product_file.import_to_database()
assert Product.objects.count() == 5
# verify imported data
pa = Product.objects.get(product_id="Product A")
assert pa.list_price is not None
assert pa.list_price == 0.00
pa2 = Product.objects.get(product_id="Product A2")
assert pa2.list_price is not None
assert pa2.list_price == 0.00
pb = Product.objects.get(product_id="Product B")
assert pb.list_price is not None
assert pb.list_price == 0.00
pb2 = Product.objects.get(product_id="Product B2")
assert pb2.list_price is not None
assert pb2.list_price == 0.00
pc = Product.objects.get(product_id="Product C")
assert pc.list_price is None, "No list price provided, therefore it should be None"
@pytest.mark.usefixtures("apply_base_import_products_excel_file_mock")
def test_import_with_different_vendors(self):
"""ensure that Product constraints are valid"""
global CURRENT_PRODUCT_TEST_DATA
CURRENT_PRODUCT_TEST_DATA = pd.DataFrame(
[
[
"Product A",
"description of Product A",
".53",
"USD",
"Cisco Systems",
datetime.datetime(2016, 1, 1),
datetime.datetime(2016, 1, 2),
datetime.datetime(2016, 1, 3),
datetime.datetime(2016, 1, 4),
datetime.datetime(2016, 1, 5),
datetime.datetime(2016, 1, 6),
datetime.datetime(2016, 1, 7),
datetime.datetime(2016, 1, 8),
datetime.datetime(2016, 1, 9),
"",
""
],
[
"Product A",
"description of Product A",
".53",
"USD",
"Juniper Networks",
datetime.datetime(2016, 1, 1),
datetime.datetime(2016, 1, 2),
datetime.datetime(2016, 1, 3),
datetime.datetime(2016, 1, 4),
datetime.datetime(2016, 1, 5),
datetime.datetime(2016, 1, 6),
datetime.datetime(2016, 1, 7),
datetime.datetime(2016, 1, 8),
datetime.datetime(2016, 1, 9),
"",
""
]
], columns=PRODUCTS_TEST_DATA_COLUMNS
)
user = User.objects.get(username="api")
product_file = ProductsExcelImporter(
"virtual_file.xlsx",
user_for_revision=user
)
product_file.verify_file()
product_file.import_to_database()
assert Product.objects.count() == 2
@pytest.mark.usefixtures("apply_base_import_products_excel_file_mock")
def test_import_with_unknown_vendor(self):
"""test with vendor that doesn't exist"""
global CURRENT_PRODUCT_TEST_DATA
CURRENT_PRODUCT_TEST_DATA = pd.DataFrame(
[
[
"Product A",
"description of Product A",
".53",
"USD",
"FooBar Unknown",
datetime.datetime(2016, 1, 1),
datetime.datetime(2016, 1, 2),
datetime.datetime(2016, 1, 3),
datetime.datetime(2016, 1, 4),
datetime.datetime(2016, 1, 5),
datetime.datetime(2016, 1, 6),
datetime.datetime(2016, 1, 7),
datetime.datetime(2016, 1, 8),
datetime.datetime(2016, 1, 9),
"",
""
]
], columns=PRODUCTS_TEST_DATA_COLUMNS
)
user = User.objects.get(username="api")
product_file = ProductsExcelImporter(
"virtual_file.xlsx",
user_for_revision=user
)
product_file.verify_file()
with pytest.raises(Exception) as exinfo:
product_file.import_to_database()
assert exinfo.match("unknown vendor ")
@pytest.mark.usefixtures("apply_base_import_products_excel_file_mock")
def test_import_with_internal_product_id(self):
"""Test the import of the internal product id column"""
test_value = "some custom data"
global CURRENT_PRODUCT_TEST_DATA
CURRENT_PRODUCT_TEST_DATA = pd.DataFrame(
[
[
"Product A",
"description of Product A",
".53",
"USD",
"Cisco Systems",
test_value
]
], columns=[
"product id",
"description",
"list price",
"currency",
"vendor",
"internal product id"
]
)
user = User.objects.get(username="api")
product_file = ProductsExcelImporter(
"virtual_file.xlsx",
user_for_revision=user
)
product_file.verify_file()
product_file.import_to_database()
assert Product.objects.count() == 1
# verify imported data
pa = Product.objects.get(product_id="Product A")
assert pa.internal_product_id is not None
assert pa.internal_product_id == test_value
def test_invalid_file(self):
valid_test_file = os.path.join(os.getcwd(), "tests", "data", "file_not_found.xlsx")
product_file = ProductsExcelImporter(valid_test_file)
with pytest.raises(Exception) as exinfo:
product_file.verify_file()
assert exinfo.match("No such file or directory:")
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
class TestProductMigrationExcelImporter:
@pytest.mark.usefixtures("apply_base_import_products_excel_file_mock")
def test_valid_import(self):
models.Product.objects.create(product_id="Product A", vendor=Vendor.objects.get(id=1))
models.ProductMigrationSource.objects.create(name="Existing Migration Source")
product_migrations_file = ProductMigrationsExcelImporter("virtual_file.xlsx")
assert product_migrations_file.is_valid_file() is False
product_migrations_file.verify_file()
assert product_migrations_file.is_valid_file() is True
product_migrations_file.import_to_database()
assert ProductMigrationSource.objects.count() == 2
assert ProductMigrationOption.objects.count() == 2
assert len(product_migrations_file.import_result_messages) == 3
assert "Product Migration Source \"New Migration Source\" was created with a preference of 10" in product_migrations_file.import_result_messages
assert "create Product Migration path \"New Migration Source\" for Product \"Product A\"" in product_migrations_file.import_result_messages
assert "create Product Migration path \"Existing Migration Source\" for Product \"Product A\"" in product_migrations_file.import_result_messages
product_migrations_file.import_to_database()
assert ProductMigrationSource.objects.count() == 2
assert ProductMigrationOption.objects.count() == 2
assert len(product_migrations_file.import_result_messages) == 2
assert "update Product Migration path \"New Migration Source\" for Product \"Product A\"" in product_migrations_file.import_result_messages
assert "update Product Migration path \"Existing Migration Source\" for Product \"Product A\"" in product_migrations_file.import_result_messages
del product_migrations_file
Product.objects.all().delete()
ProductMigrationOption.objects.all().delete()
ProductMigrationSource.objects.all().delete()
@pytest.mark.usefixtures("apply_base_import_products_excel_file_mock")
def test_import_with_missing_migration_source(self):
"""test import with missing migration source (ignore it)"""
global CURRENT_PRODUCT_MIGRATION_TEST_DATA
CURRENT_PRODUCT_MIGRATION_TEST_DATA = pd.DataFrame(
[
[
"Product A",
"Cisco Systems",
"",
"Replacement Product ID",
"comment of the migration",
"Invalid URL"
]
], columns=PRODUCT_MIGRATION_TEST_DATA_COLUMNS
)
models.Product.objects.create(product_id="Product A", vendor=Vendor.objects.get(id=1))
models.ProductMigrationSource.objects.create(name="Existing Migration Source")
product_migrations_file = ProductMigrationsExcelImporter("virtual_file.xlsx")
assert product_migrations_file.is_valid_file() is False
product_migrations_file.verify_file()
assert product_migrations_file.is_valid_file() is True
product_migrations_file.import_to_database()
assert ProductMigrationOption.objects.count() == 0
Product.objects.all().delete()
ProductMigrationOption.objects.all().delete()
ProductMigrationSource.objects.all().delete()
@pytest.mark.usefixtures("apply_base_import_products_excel_file_mock")
def test_import_with_missing_product_value(self):
"""test import with missing product (ignore it)"""
global CURRENT_PRODUCT_MIGRATION_TEST_DATA
CURRENT_PRODUCT_MIGRATION_TEST_DATA = pd.DataFrame(
[
[
None,
"Cisco Systems",
"Existing Migration Source",
"Replacement Product ID",
"comment of the migration",
"Invalid URL"
]
], columns=PRODUCT_MIGRATION_TEST_DATA_COLUMNS
)
models.Product.objects.create(product_id="Product A", vendor=Vendor.objects.get(id=1))
models.ProductMigrationSource.objects.create(name="Existing Migration Source")
product_migrations_file = ProductMigrationsExcelImporter("no file.xlsx")
assert product_migrations_file.is_valid_file() is False
product_migrations_file.verify_file()
assert product_migrations_file.is_valid_file() is True
product_migrations_file.import_to_database()
assert ProductMigrationOption.objects.count() == 0
assert len(product_migrations_file.import_result_messages) == 0
Product.objects.all().delete()
ProductMigrationOption.objects.all().delete()
ProductMigrationSource.objects.all().delete()
@pytest.mark.usefixtures("apply_base_import_products_excel_file_mock")
def test_import_with_invalid_url_format(self):
"""test what happens if the data contains invalid data"""
global CURRENT_PRODUCT_MIGRATION_TEST_DATA
CURRENT_PRODUCT_MIGRATION_TEST_DATA = pd.DataFrame(
[
[
"Product A",
"Cisco Systems",
"Existing Migration Source",
"Replacement Product ID",
"comment of the migration",
"Invalid URL"
]
], columns=PRODUCT_MIGRATION_TEST_DATA_COLUMNS
)
models.Product.objects.create(product_id="Product A", vendor=Vendor.objects.get(id=1))
models.ProductMigrationSource.objects.create(name="Existing Migration Source")
product_migrations_file = ProductMigrationsExcelImporter("virtual_file.xlsx")
assert product_migrations_file.is_valid_file() is False
product_migrations_file.verify_file()
assert product_migrations_file.is_valid_file() is True
product_migrations_file.import_to_database()
assert ProductMigrationOption.objects.count() == 0
assert "cannot save Product Migration for Product A: {'migration_product_info_url': " \
"['Enter a valid URL.']}" in product_migrations_file.import_result_messages
Product.objects.all().delete()
ProductMigrationOption.objects.all().delete()
ProductMigrationSource.objects.all().delete()
@pytest.mark.usefixtures("apply_base_import_products_excel_file_mock")
def test_with_invalid_product_id(self):
"""test the behavior of the import function if a product was not found in the database"""
global CURRENT_PRODUCT_MIGRATION_TEST_DATA
CURRENT_PRODUCT_MIGRATION_TEST_DATA = pd.DataFrame(
[
[
"Product that is not in the Database",
"Cisco Systems",
"Existing Migration Source",
"Replacement Product ID",
"comment of the migration",
"Invalid URL"
]
], columns=PRODUCT_MIGRATION_TEST_DATA_COLUMNS
)
models.Product.objects.create(product_id="Product A", vendor=Vendor.objects.get(id=1))
models.ProductMigrationSource.objects.create(name="Existing Migration Source")
product_migrations_file = ProductMigrationsExcelImporter("virtual_file.xlsx")
assert product_migrations_file.is_valid_file() is False
product_migrations_file.verify_file()
assert product_migrations_file.is_valid_file() is True
product_migrations_file.import_to_database()
assert ProductMigrationOption.objects.count() == 0
assert "Product Product that is not in the Database not found in database, skip " \
"entry" in product_migrations_file.import_result_messages
Product.objects.all().delete()
ProductMigrationOption.objects.all().delete()
ProductMigrationSource.objects.all().delete()
@pytest.mark.usefixtures("import_default_users")
@pytest.mark.usefixtures("import_default_vendors")
class TestMigratedImportProductsExcelFile:
@staticmethod
def prepare_import_products_excel_file(filename, verify_file=True, start_import=True):
valid_test_file = os.path.join(os.getcwd(), "tests", "data", filename)
product_file = ProductsExcelImporter(valid_test_file)
if verify_file:
product_file.verify_file()
if start_import:
product_file.import_to_database()
return product_file
def test_valid_product_import_using_excel_with_currency_column(self):
test_product_ids = [
'WS-C2960S-48FPD-L',
'WS-C2960S-48LPD-L',
'WS-C2960S-24PD-L',
'WS-C2960S-48TD-L',
'WS-C2960S-24TD-L',
'WS-C2960S-48FPS-L',
'WS-C2960S-48LPS-L',
'WS-C2960S-24PS-L',
'CAB-STK-E-0.5M',
'CAB-STK-E-1M=',
'CAB-STK-E-1M',
'CAB-STK-E-3M=',
'CAB-CONSOLE-RJ45=',
'CAB-CONSOLE-USB=',
'EX4200-24F',
'EX4200-24F-DC',
'WS-C2960S-48TS-L',
'WS-C2960S-24TS-L',
'WS-C2960S-48TS-S',
'WS-C2960S-24TS-S',
'C2960S-STACK',
'C2960S-STACK=',
'CAB-STK-E-0.5M=',
'EX4200-24F-S',
'EX4200-24PX',
]
products = [
{
'product id': 'WS-C2960S-48FPD-L',
'description': 'Catalyst 2960S 48 GigE PoE 740W, 2 x 10G SFP+ LAN Base',
'list price': 8795,
'currency': 'USD',
'vendor': 'Cisco Systems',
},
{
'product id': 'CAB-STK-E-1M',
'description': 'Cisco FlexStack 1m stacking cable',
'list price': 100,
'currency': 'USD',
'vendor': 'unassigned',
},
{
'product id': 'CAB-STK-E-1M=',
'description': 'Cisco Bladeswitch 1M stack cable',
'list price': None,
'currency': 'USD',
'vendor': 'Cisco Systems',
}
]
product_file = self.prepare_import_products_excel_file("excel_import_products_test.xlsx")
assert product_file.valid_imported_products == 25
assert product_file.invalid_products == 0
assert product_file.amount_of_products == 25
assert product_file.import_result_messages is not None
# verify that the expected products are created in the database
for pid in test_product_ids:
Product.objects.get(product_id=pid)
# look at the imported values from the
for product in products:
p = Product.objects.get(product_id=product['product id'])
assert p.description == product['description']
assert p.list_price == product['list price']
assert p.currency == product['currency']
assert p.vendor.name == product['vendor']
def test_valid_product_import_in_update_only_mode(self):
test_product_ids = [
'WS-C2960S-48FPD-L'
]
products = [
{
'product id': 'WS-C2960S-48FPD-L',
'description': 'Catalyst 2960S 48 GigE PoE 740W, 2 x 10G SFP+ LAN Base',
'list price': 8795,
'currency': 'USD',
'vendor': 'Cisco Systems',
}
]
models.Product.objects.create(product_id="WS-C2960S-48FPD-L", vendor=Vendor.objects.get(id=1))
product_file = self.prepare_import_products_excel_file("excel_import_products_test.xlsx", start_import=False)
product_file.import_to_database(update_only=True)
assert product_file.valid_imported_products == 1
assert product_file.invalid_products == 0
assert product_file.amount_of_products == 25
assert product_file.import_result_messages is not None
# verify that the expected products are created in the database
for pid in test_product_ids:
Product.objects.get(product_id=pid)
# look at the imported values from the
for product in products:
p = Product.objects.get(product_id=product['product id'])
assert p.description == product['description']
assert p.list_price == product['list price']
assert p.currency == product['currency']
assert p.vendor.name == product['vendor']
def test_valid_product_import_using_excel_without_currency_column(self):
test_product_ids = [
'WS-C2960S-48FPD-L',
'WS-C2960S-48LPD-L',
'WS-C2960S-24PD-L',
'WS-C2960S-48TD-L',
'WS-C2960S-24TD-L',
'WS-C2960S-48FPS-L',
'WS-C2960S-48LPS-L',
'WS-C2960S-24PS-L',
'CAB-STK-E-0.5M',
'CAB-STK-E-1M=',
'CAB-STK-E-1M',
'CAB-STK-E-3M=',
'CAB-CONSOLE-RJ45=',
'CAB-CONSOLE-USB=',
'EX4200-24F',
'EX4200-24F-DC',
'WS-C2960S-48TS-L',
'WS-C2960S-24TS-L',
'WS-C2960S-48TS-S',
'WS-C2960S-24TS-S',
'C2960S-STACK',
'C2960S-STACK=',
'CAB-STK-E-0.5M=',
'EX4200-24F-S',
'EX4200-24PX',
]
products = [
{
'product id': 'WS-C2960S-48FPD-L',
'description': 'Catalyst 2960S 48 GigE PoE 740W, 2 x 10G SFP+ LAN Base',
'list price': 8795,
'currency': 'USD',
'vendor': 'Cisco Systems',
},
{
'product id': 'CAB-STK-E-1M',
'description': 'Cisco FlexStack 1m stacking cable',
'list price': 100,
'currency': 'USD',
'vendor': 'unassigned',
},
{
'product id': 'CAB-STK-E-1M=',
'description': 'Cisco Bladeswitch 1M stack cable',
'list price': None,
'currency': 'USD',
'vendor': 'Cisco Systems',
}
]
product_file = self.prepare_import_products_excel_file("excel_import_products_test-wo_currency.xlsx")
assert product_file.valid_imported_products == 25, "\n".join([l for l in product_file.import_result_messages])
assert product_file.invalid_products == 0
assert product_file.amount_of_products == 25
assert product_file.import_result_messages is not None
# verify that the expected products are created in the database
for pid in test_product_ids:
Product.objects.get(product_id=pid)
# look at the imported values from the
for product in products:
p = Product.objects.get(product_id=product['product id'])
assert p.description == product['description']
assert p.list_price == product['list price']
assert p.currency == product['currency']
assert p.vendor.name == product['vendor']
def test_valid_product_import_with_eox_update_timestamp_date_using_excel_file(self):
lc_product_id = "WS-C2960S-48FPD-L"
no_lc_product_id = "EX4200-24PX"
self.prepare_import_products_excel_file("excel_import_products_test-with_eol_data.xlsx")
# verify the lifecycle information for the test products
lc_product = Product.objects.get(product_id=lc_product_id)
no_lc_product = Product.objects.get(product_id=no_lc_product_id)
assert lc_product.eox_update_time_stamp is not None
assert no_lc_product.eox_update_time_stamp is None
assert datetime.date(2016, 1, 1) == lc_product.eox_update_time_stamp
def test_valid_product_import_with_eol_announcement_date_using_excel_file(self):
lc_product_id = "WS-C2960S-48LPD-L"
no_lc_product_id = "EX4200-24PX"
self.prepare_import_products_excel_file("excel_import_products_test-with_eol_data.xlsx")
# verify the lifecycle information for the test products
lc_product = Product.objects.get(product_id=lc_product_id)
no_lc_product = Product.objects.get(product_id=no_lc_product_id)
assert lc_product.eol_ext_announcement_date is not None
assert no_lc_product.eol_ext_announcement_date is None
assert datetime.date(2016, 1, 1) == lc_product.eol_ext_announcement_date
def test_valid_product_import_with_eos_date_using_excel_file(self):
lc_product_id = "WS-C2960S-24PD-L"
no_lc_product_id = "EX4200-24PX"
self.prepare_import_products_excel_file("excel_import_products_test-with_eol_data.xlsx")
# verify the lifecycle information for the test products
lc_product = Product.objects.get(product_id=lc_product_id)
no_lc_product = Product.objects.get(product_id=no_lc_product_id)
assert lc_product.end_of_sale_date is not None
assert no_lc_product.end_of_sale_date is None
assert datetime.date(2016, 1, 1) == lc_product.end_of_sale_date
def test_valid_product_import_with_end_of_new_service_attachment_date_using_excel_file(self):
lc_product_id = "WS-C2960S-48TD-L"
no_lc_product_id = "EX4200-24PX"
self.prepare_import_products_excel_file("excel_import_products_test-with_eol_data.xlsx")
# verify the lifecycle information for the test products
lc_product = Product.objects.get(product_id=lc_product_id)
no_lc_product = Product.objects.get(product_id=no_lc_product_id)
assert lc_product.end_of_sale_date is None
assert lc_product.end_of_new_service_attachment_date is not None
assert no_lc_product.end_of_new_service_attachment_date is None
assert datetime.date(2016, 1, 1) == lc_product.end_of_new_service_attachment_date
def test_valid_product_import_with_end_of_sw_maintenance_date_using_excel_file(self):
lc_product_id = "WS-C2960S-24TD-L"
no_lc_product_id = "EX4200-24PX"
self.prepare_import_products_excel_file("excel_import_products_test-with_eol_data.xlsx")
# verify the lifecycle information for the test products
lc_product = Product.objects.get(product_id=lc_product_id)
no_lc_product = Product.objects.get(product_id=no_lc_product_id)
assert lc_product.end_of_sale_date is None
assert lc_product.end_of_sw_maintenance_date is not None
assert no_lc_product.end_of_sw_maintenance_date is None
assert datetime.date(2016, 1, 1) == lc_product.end_of_sw_maintenance_date
def test_valid_product_import_with_end_of_routine_failure_analysis_using_excel_file(self):
lc_product_id = "WS-C2960S-48FPS-L"
no_lc_product_id = "EX4200-24PX"
self.prepare_import_products_excel_file("excel_import_products_test-with_eol_data.xlsx")
# verify the lifecycle information for the test products
lc_product = Product.objects.get(product_id=lc_product_id)
no_lc_product = Product.objects.get(product_id=no_lc_product_id)
assert lc_product.end_of_sale_date is None
assert lc_product.end_of_routine_failure_analysis is not None
assert no_lc_product.end_of_routine_failure_analysis is None
assert datetime.date(2016, 1, 1) == lc_product.end_of_routine_failure_analysis
def test_valid_product_import_with_end_of_service_contract_renewal_using_excel_file(self):
lc_product_id = "WS-C2960S-48LPS-L"
no_lc_product_id = "EX4200-24PX"
self.prepare_import_products_excel_file("excel_import_products_test-with_eol_data.xlsx")
# verify the lifecycle information for the test products
lc_product = Product.objects.get(product_id=lc_product_id)
no_lc_product = Product.objects.get(product_id=no_lc_product_id)
assert lc_product.end_of_sale_date is None
assert lc_product.end_of_service_contract_renewal is not None
assert no_lc_product.end_of_service_contract_renewal is None
assert datetime.date(2016, 1, 1) == lc_product.end_of_service_contract_renewal
def test_valid_product_import_with_end_of_support_date_using_excel_file(self):
lc_product_id = "WS-C2960S-24PS-L"
no_lc_product_id = "EX4200-24PX"
self.prepare_import_products_excel_file("excel_import_products_test-with_eol_data.xlsx")
# verify the lifecycle information for the test products
lc_product = Product.objects.get(product_id=lc_product_id)
no_lc_product = Product.objects.get(product_id=no_lc_product_id)
assert lc_product.end_of_sale_date is None
assert lc_product.end_of_support_date is not None
assert no_lc_product.end_of_support_date is None
assert datetime.date(2016, 1, 1), lc_product.end_of_support_date
def test_valid_product_import_with_end_of_sec_vuln_supp_date_using_excel_file(self):
lc_product_id = "CAB-STK-E-0.5M"
no_lc_product_id = "EX4200-24PX"
self.prepare_import_products_excel_file("excel_import_products_test-with_eol_data.xlsx")
# verify the lifecycle information for the test products
lc_product = Product.objects.get(product_id=lc_product_id)
no_lc_product = Product.objects.get(product_id=no_lc_product_id)
assert lc_product.end_of_sale_date is None
assert lc_product.end_of_sec_vuln_supp_date is not None
assert no_lc_product.end_of_sec_vuln_supp_date is None
assert datetime.date(2016, 1, 1) == lc_product.end_of_sec_vuln_supp_date
def test_valid_product_import_with_eol_note_using_excel_file(self):
lc_product_id = "CAB-STK-E-1M="
no_lc_product_id = "EX4200-24PX"
self.prepare_import_products_excel_file("excel_import_products_test-with_eol_data.xlsx")
# verify the lifecycle information for the test products
lc_product = Product.objects.get(product_id=lc_product_id)
no_lc_product = Product.objects.get(product_id=no_lc_product_id)
assert lc_product.end_of_sale_date is None
assert lc_product.eol_reference_url is not None
assert lc_product.eol_reference_number is not None
assert no_lc_product.eol_reference_url is None
assert "http://localhost/myurl" == lc_product.eol_reference_url
assert "My Friendly Name" == lc_product.eol_reference_number
# test without eol_reference_number
lc_product_id = "CAB-STK-E-1M"
lc_product = Product.objects.get(product_id=lc_product_id)
assert lc_product.eol_reference_url is not None
assert lc_product.eol_reference_number is None
assert "http://localhost/myurl" == lc_product.eol_reference_url
def test_valid_product_import_with_full_lifecycle_data_using_excel_file(self):
lc_product_id = "CAB-STK-E-3M="
self.prepare_import_products_excel_file("excel_import_products_test-with_eol_data.xlsx")
# verify the lifecycle information for the test products
lc_product = Product.objects.get(product_id=lc_product_id)
assert datetime.date(2016, 1, 1) == lc_product.eox_update_time_stamp
assert datetime.date(2016, 1, 2) == lc_product.eol_ext_announcement_date
assert datetime.date(2016, 1, 3) == lc_product.end_of_sale_date
assert datetime.date(2016, 1, 4) == lc_product.end_of_new_service_attachment_date
assert datetime.date(2016, 1, 5) == lc_product.end_of_sw_maintenance_date
assert datetime.date(2016, 1, 6) == lc_product.end_of_routine_failure_analysis
assert datetime.date(2016, 1, 7) == lc_product.end_of_service_contract_renewal
assert datetime.date(2016, 1, 8) == lc_product.end_of_sec_vuln_supp_date
assert datetime.date(2016, 1, 9) == lc_product.end_of_support_date
assert "http://localhost" == lc_product.eol_reference_url
assert "comment" == lc_product.eol_reference_number
def test_ignore_list_price_by_excel_upload_with_separate_currency_column(self):
# create entry with random list price
cis_vendor = Vendor.objects.get(name__startswith="Cisco")
lc_product_id = "WS-C2960S-48FPD-L"
lc_product = Product.objects.create(product_id=lc_product_id, vendor=cis_vendor)
lc_product.list_price = 1.0
lc_product.currency = "EUR"
lc_product.list_price_timestamp = datetime.date(day=1, month=1, year=2016)
lc_product.save()
# update previously created entry using the excel import
self.prepare_import_products_excel_file("excel_import_products_test-without_list_prices.xlsx")
# verify the lifecycle information for the test products
lc_product = Product.objects.get(product_id=lc_product_id)
assert 1.0 == lc_product.list_price
assert "EUR" == lc_product.currency
assert datetime.date.today() == lc_product.list_price_timestamp, "verify that timestamp is set to today"
def test_change_product_list_price_by_excel_upload_with_separate_currency_column(self):
# create entry with random list price
cis_vendor = Vendor.objects.get(name__startswith="Cisco")
lc_product_id = "WS-C2960S-48FPD-L"
lc_product = Product.objects.create(product_id=lc_product_id, vendor=cis_vendor)
lc_product.list_price = 1.0
lc_product.list_price_timestamp = datetime.date(day=1, month=1, year=2016)
lc_product.save()
self.prepare_import_products_excel_file("excel_import_products_test-with_eol_data.xlsx")
# verify the lifecycle information for the test products
lc_product = Product.objects.get(product_id=lc_product_id)
assert 8795.0 == lc_product.list_price
assert datetime.date.today() == lc_product.list_price_timestamp, "verify that timestamp is set to today"
def test_change_product_list_price_by_excel_upload_without_separate_currency_column(self):
# create entry with random list price
cis_vendor = Vendor.objects.get(name__startswith="Cisco")
lc_product_id = "WS-C2960S-48FPD-L"
lc_product = Product.objects.create(product_id=lc_product_id, vendor=cis_vendor)
lc_product.list_price = 1.0
lc_product.list_price_timestamp = datetime.date(day=1, month=1, year=2016)
lc_product.save()
self.prepare_import_products_excel_file("excel_import_products_test-wo_currency.xlsx")
# verify the lifecycle information for the test products
lc_product = Product.objects.get(product_id=lc_product_id)
assert 8795.0 == lc_product.list_price
assert datetime.date.today() == lc_product.list_price_timestamp, "verify that timestamp is set to today"
def test_invalid_product_import_using_excel_file_with_invalid_keys(self):
product_file = self.prepare_import_products_excel_file(
"excel_import_products_test-invalid_keys.xlsx",
verify_file=False
)
with pytest.raises(InvalidImportFormatException):
product_file.verify_file()
def test_invalid_product_import_using_excel_file_with_invalid_table_name(self):
product_file = self.prepare_import_products_excel_file(
"excel_import_products_test-invalid_table_name.xlsx",
verify_file=False
)
with pytest.raises(InvalidImportFormatException):
product_file.verify_file()
def test_invalid_product_import_using_invalid_file(self):
product_file = self.prepare_import_products_excel_file(
"cisco_test_data.json",
verify_file=False
)
with pytest.raises(InvalidExcelFileFormat):
product_file.verify_file()
def test_import_with_product_group(self):
my_first_group_name = "My First Group"
my_first_group_list = [
"WS-C2960S-48FPD-L",
"WS-C2960S-24PD-L",
"WS-C2960S-24TD-L",
]
my_second_group_name = "My Second Group"
my_second_group_list = [
"WS-C2960S-48LPD-L",
"WS-C2960S-48TD-L",
"WS-C2960S-48FPS-L",
]
example_none_value = "WS-C2960S-48LPS-L"
cis_vendor = Vendor.objects.get(id=1)
assert ProductGroup.objects.all().count() == 0
self.prepare_import_products_excel_file("excel_import_products_test-with_product_group.xlsx")
assert ProductGroup.objects.all().count() == 2
# get new objects
pg1 = ProductGroup.objects.filter(name=my_first_group_name).first()
pg2 = ProductGroup.objects.filter(name=my_second_group_name).first()
assert pg1.vendor == cis_vendor
assert pg2.vendor == cis_vendor
# verify products in list
assert len(my_first_group_list) == pg1.get_all_products().count()
result_first_elements = sorted([e.product_id for e in pg1.get_all_products()])
my_first_group_list.sort()
assert result_first_elements == my_first_group_list
assert len(my_second_group_list) == pg2.get_all_products().count()
result_second_elements = sorted([e.product_id for e in pg2.get_all_products()])
my_second_group_list.sort()
assert result_second_elements == my_second_group_list
# test that given value has no group assignment
p = Product.objects.get(product_id=example_none_value)
assert p.product_group is None
| |
"""Main logic"""
import argparse
import os
import sys
from typing import cast, List, Optional
import traceback
import questionary
import requests
import toml
from tqdm import tqdm
from . import constants
from .models import (
ManiaException,
ManiaSeriousException,
UnavailableException,
Client,
Track,
Album,
Artist,
Media,
MediaType,
)
from . import metadata
from .tidal import TidalAuthError, TidalSession, TidalClient
def log(config: dict, message: str = "", indent: int = 0) -> None:
"""Log a message to stdout unless config["quiet"] is set. Optionally indent
by `indent` levels"""
if message is not None and not config["quiet"]:
print(constants.INDENT * indent + str(message))
def sanitize(config: dict, string: str, length_padding: int = 0) -> str:
"""Sanitize a string for use as a filesystem path"""
if config["nice-format"]:
alphanumeric = "".join(c for c in string if c.isalnum() or c in (" ", "-"))
hyphenated = alphanumeric.replace(" ", "-")
sanitized = "-".join(word for word in hyphenated.split("-") if word).lower()
else:
illegal_characters = frozenset("/")
sanitized = "".join(c for c in string if c not in illegal_characters)
# get maximum filename length (bytes)
max_length = os.statvfs(config["output-directory"]).f_namemax
# truncate unicode string to a byte count
encoded = sanitized.encode("utf-8")[: max_length - length_padding]
return encoded.decode("utf-8", "ignore")
def search(
client: Client,
config: dict,
media_type: MediaType,
query: str,
) -> Media:
if config["by-id"]:
result = {
Track: client.get_track_by_id,
Album: client.get_album_by_id,
Artist: client.get_artist_by_id,
}[media_type](query)
if result is None:
media_type_name = {
Track: "track",
Album: "album",
Artist: "artist",
}[media_type]
raise ManiaSeriousException(
f"Couldn't find the {media_type_name} with ID {query}."
)
return result
log(config, "Searching...")
results = client.search(query, media_type, config["search-count"])
if not results:
raise ManiaSeriousException("No results found.")
if config["lucky"]:
return results[0]
def label_track(track: Track) -> str:
name = track.name
artists = ", ".join([artist.name for artist in track.artists])
album = track.album.name
indent = constants.INDENT + " " * 3
year = track.album.year
label = name
if track.explicit:
label += " [E]"
if track.best_available_quality == "master":
label += " [M]"
label += f"\n{indent}{artists}\n{indent}{album}"
if year:
label += f" ({year})"
if track.album.explicit:
label += " [E]"
if track.album.best_available_quality == "master":
label += " [M]"
return label
def label_album(album: Album) -> str:
name = album.name
artists = ", ".join([artist.name for artist in album.artists])
indent = constants.INDENT + " " * 3
year = album.year
label = name
if year:
label += f" ({year})"
if album.explicit:
label += " [E]"
if album.best_available_quality == "master":
label += " [M]"
label += f"\n{indent}{artists}"
return label
def label_artist(artist: Artist) -> str:
return artist.name
labeler = {
Track: label_track,
Album: label_album,
Artist: label_artist,
}[media_type]
choices = [questionary.Choice(labeler(result), value=result) for result in results]
answer = questionary.select("Select one:", choices=choices).ask()
if not answer:
raise ManiaException("")
return answer
def resolve_metadata(config: dict, track: Track, path: str, indent: int) -> None:
"""Embed tags and cover art from `track` to the file at `path`"""
log(config, "Resolving metadata...", indent=indent)
cover: Optional[metadata.Cover]
if track.album.cover_url:
request = requests.get(track.album.cover_url)
request.raise_for_status()
data = request.content
mime = request.headers.get("Content-Type", "")
cover = metadata.Cover(data, mime)
else:
cover = None
{"mp4": metadata.resolve_mp4_metadata, "flac": metadata.resolve_flac_metadata}[
track.file_extension
](config, track, path, cover)
def get_track_path(
client: Client,
config: dict,
track: Track,
siblings: List[Track] = None,
include_artist: bool = False,
include_album: bool = False,
) -> str:
artist_path = ""
album_path = ""
disc_path = ""
track_path = ""
temporary_extension = f".{constants.TEMPORARY_EXTENSION}.{track.file_extension}"
if include_artist or config["full-structure"]:
artist_path = sanitize(config, track.album.artists[0].name)
album_format_string = config["album-format"]
else:
album_format_string = config["individual-album-format"]
siblings = siblings or client.get_album_tracks(track.album)
maximum_disc_number = max(sibling.disc_number for sibling in siblings)
maximum_track_number = max(sibling.track_number for sibling in siblings)
if include_album or config["full-structure"]:
if maximum_disc_number > 1:
disc_number = str(track.disc_number).zfill(len(str(maximum_disc_number)))
disc_path = sanitize(config, f"Disc {disc_number}")
album_formatted = album_format_string.format(**track.album.format_dict())
album_path = sanitize(config, album_formatted)
track_format_string = config["track-format"]
else:
track_format_string = config["individual-track-format"]
track_format_dict = track.format_dict(maximum_track_number=maximum_track_number)
track_path = sanitize(
config,
track_format_string.format(**track_format_dict),
length_padding=len(temporary_extension),
)
return os.path.join(
config["output-directory"], artist_path, album_path, disc_path, track_path
)
def download_track(
client: Client,
config: dict,
track: Track,
siblings: List[Track] = None,
include_artist: bool = False,
include_album: bool = False,
indent: int = 0,
) -> None:
track_path = get_track_path(
client,
config,
track,
siblings=siblings,
include_artist=include_artist,
include_album=include_album,
)
temporary_path = (
f"{track_path}.{constants.TEMPORARY_EXTENSION}.{track.file_extension}"
)
final_path = f"{track_path}.{track.file_extension}"
if os.path.isfile(final_path):
log(
config,
f"Skipping download of {os.path.basename(final_path)}; it already exists.",
indent=indent,
)
return
try:
media_url = client.get_media(track)
except UnavailableException:
log(
config,
f"Skipping download of {os.path.basename(final_path)}; track is not available. Perhaps it's region-locked?",
indent=indent,
)
return
os.makedirs(os.path.dirname(final_path), exist_ok=True)
request = requests.get(media_url, stream=True)
request.raise_for_status()
with open(temporary_path, mode="wb") as temp_file:
chunk_size = constants.DOWNLOAD_CHUNK_SIZE
iterator = request.iter_content(chunk_size=chunk_size)
if config["quiet"]:
for chunk in iterator:
temp_file.write(chunk)
else:
total = int(request.headers["Content-Length"])
with tqdm(
total=total,
miniters=1,
unit="B",
unit_divisor=1024,
unit_scale=True,
dynamic_ncols=True,
) as progress_bar:
for chunk in iterator:
temp_file.write(chunk)
progress_bar.update(chunk_size)
if not config["skip-metadata"]:
try:
resolve_metadata(config, track, temporary_path, indent)
except metadata.InvalidFileError:
log(
config,
f"Skipping {os.path.basename(final_path)}; received invalid file",
indent=indent,
)
os.remove(temporary_path)
return
os.rename(temporary_path, final_path)
def handle_track(client: Client, config: dict, query: str) -> None:
track = cast(Track, search(client, config, Track, query))
log(config, f'Downloading "{track.name}"...')
download_track(client, config, track)
def download_album(
client: Client,
config: dict,
album: Album,
include_artist: bool = False,
indent: int = 0,
) -> None:
tracks = client.get_album_tracks(album)
for index, track in enumerate(tracks, 1):
log(
config,
f'Downloading "{track.name}" ({index} of {len(tracks)} track(s))...',
indent=indent,
)
download_track(
client,
config,
track,
siblings=tracks,
include_artist=include_artist,
include_album=True,
indent=indent + 1,
)
def handle_album(client: Client, config: dict, query: str) -> None:
album = cast(Album, search(client, config, Album, query))
log(config, f'Downloading "{album.name}"...')
download_album(client, config, album)
def download_artist(
client: Client, config: dict, artist: Artist, indent: int = 0
) -> None:
albums = client.get_artist_albums(artist)
if config["include-eps-singles"]:
eps_singles = client.get_artist_eps_singles(artist)
albums = [*albums, *eps_singles]
for index, album in enumerate(albums, 1):
log(
config,
f'Downloading "{album.name}" ({index} of {len(albums)} album(s))...',
indent=indent,
)
download_album(client, config, album, include_artist=True, indent=indent + 1)
def handle_artist(client: Client, config: dict, query: str) -> None:
artist = cast(Artist, search(client, config, Artist, query))
log(config, f'Downloading "{artist.name}"...')
download_artist(client, config, artist)
def handle_url(client: Client, config: dict, url: str):
try:
media_type, media = client.resolve_url(url)
except ValueError as error:
raise ManiaSeriousException(str(error)) from error
if media is None:
raise ManiaSeriousException(f"Couldn't find anything at that URL.")
log(config, f'Downloading "{media.name}"...')
downloader = {
Track: download_track,
Album: download_album,
Artist: download_artist,
}[media_type]
downloader(client, config, media)
def load_config(args: dict) -> dict:
if args["config-path"]:
config_path = args["config-path"]
else:
config_path = constants.CONFIG_PATH
if not os.path.isfile(config_path):
os.makedirs(os.path.dirname(config_path), exist_ok=True)
with open(config_path, "w") as config_file:
config_file.write(constants.DEFAULT_CONFIG)
config_toml = toml.load(config_path)
def resolve(from_args, from_file, default):
if from_args is not None:
return from_args
if from_file is not None:
return from_file
return default
config = {
key: resolve(args.get(key), config_toml.get(key), default)
for key, default in constants.DEFAULT_CONFIG_TOML.items()
}
config["output-directory"] = os.path.expanduser(config["output-directory"])
os.makedirs(config["output-directory"], exist_ok=True)
config["config-path"] = config_path
return config
def run() -> None:
parser = argparse.ArgumentParser()
handlers = {
"track": handle_track,
"album": handle_album,
"artist": handle_artist,
"url": handle_url,
}
parser.add_argument("command", choices=handlers.keys())
parser.add_argument("--config-path", dest="config-path")
for key, value in constants.DEFAULT_CONFIG_TOML.items():
if isinstance(value, bool):
boolean = parser.add_mutually_exclusive_group()
# we don't use store_true/store_false here because we need the
# default value to be None, not False/True.
boolean.add_argument(f"--{key}", action="store_const", const=True, dest=key)
boolean.add_argument(
f"--no-{key}", action="store_const", const=False, dest=key
)
else:
parser.add_argument(f"--{key}", nargs="?", dest=key)
parser.add_argument("query", nargs="+")
parsed_args = parser.parse_args()
args = vars(parsed_args)
config = load_config(args)
if "username" in config or "password" in config:
config_path = config["config-path"]
log(
config,
f"Note: due to changes on TIDAL's end, login is now done by mimicking an Android TV, and directly specifying a username and password is no longer supported. You can remove them from your configuration file ({config_path}).",
)
try:
with open(constants.SESSION_PATH, "r") as session_file:
session_dict = toml.load(session_file)
session = TidalSession(**session_dict)
session.check_valid()
log(config, "Loaded cached TIDAL session.")
except (FileNotFoundError, toml.TomlDecodeError, TidalAuthError):
log(config, "No valid cached session, creating a new one...")
session = TidalSession()
session.get_authorization()
session.authenticate()
client = TidalClient(config, session)
try:
handlers[args["command"]](client, config, " ".join(args["query"]))
finally:
log(config, "Saving TIDAL session for future use...")
os.makedirs(os.path.dirname(constants.SESSION_PATH), exist_ok=True)
with open(constants.SESSION_PATH, "w") as session_file:
toml.dump(session.to_dict(), session_file)
log(config, "Done!")
def main() -> None:
"""Handle exit codes and SIGINT"""
try:
run()
except requests.exceptions.HTTPError:
print("Uncaught HTTP Error:")
traceback.print_exc(file=sys.stderr)
sys.exit(1)
except ManiaException as exception:
if str(exception):
print(exception, file=sys.stderr)
sys.exit(exception.exit_code)
except KeyboardInterrupt:
sys.exit(0)
if __name__ == "__main__":
main()
| |
import json
import os
import random
import re
import subprocess
from ccmlib import common
from ccmlib.node import ToolError
from dtest import Tester, debug
from tools.decorators import known_failure, since
class TestOfflineTools(Tester):
# In 2.0, we will get this error log message due to jamm not being
# in the classpath
ignore_log_patterns = ["Unable to initialize MemoryMeter"]
def sstablelevelreset_test(self):
"""
Insert data and call sstablelevelreset on a series of
tables. Confirm level is reset to 0 using its output.
Test a variety of possible errors and ensure response is resonable.
@since 2.1.5
@jira_ticket CASSANDRA-7614
"""
cluster = self.cluster
cluster.populate(1).start(wait_for_binary_proto=True)
node1 = cluster.nodelist()[0]
# test by trying to run on nonexistent keyspace
cluster.stop(gently=False)
try:
node1.run_sstablelevelreset("keyspace1", "standard1")
except ToolError as e:
self.assertIn("ColumnFamily not found: keyspace1/standard1", e.message)
# this should return exit code 1
self.assertEqual(e.exit_status, 1, "Expected sstablelevelreset to have a return code of 1, but instead return code was {}".format(e.exit_status))
# now test by generating keyspace but not flushing sstables
cluster.start(wait_for_binary_proto=True)
node1.stress(['write', 'n=100', 'no-warmup', '-schema', 'replication(factor=1)',
'-rate', 'threads=8'])
cluster.stop(gently=False)
output, error, rc = node1.run_sstablelevelreset("keyspace1", "standard1")
self._check_stderr_error(error)
self.assertIn("Found no sstables, did you give the correct keyspace", output)
self.assertEqual(rc, 0, msg=str(rc))
# test by writing small amount of data and flushing (all sstables should be level 0)
cluster.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node1)
session.execute("ALTER TABLE keyspace1.standard1 with compaction={'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb':1};")
node1.stress(['write', 'n=1K', 'no-warmup', '-schema', 'replication(factor=1)',
'-rate', 'threads=8'])
node1.flush()
cluster.stop(gently=False)
output, error, rc = node1.run_sstablelevelreset("keyspace1", "standard1")
self._check_stderr_error(error)
self.assertIn("since it is already on level 0", output)
self.assertEqual(rc, 0, msg=str(rc))
# test by loading large amount data so we have multiple levels and checking all levels are 0 at end
cluster.start(wait_for_binary_proto=True)
node1.stress(['write', 'n=50K', 'no-warmup', '-schema', 'replication(factor=1)',
'-rate', 'threads=8'])
cluster.flush()
self.wait_for_compactions(node1)
cluster.stop()
initial_levels = self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"]))
_, error, rc = node1.run_sstablelevelreset("keyspace1", "standard1")
final_levels = self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"]))
self._check_stderr_error(error)
self.assertEqual(rc, 0, msg=str(rc))
debug(initial_levels)
debug(final_levels)
# let's make sure there was at least L1 beforing resetting levels
self.assertTrue(max(initial_levels) > 0)
# let's check all sstables are on L0 after sstablelevelreset
self.assertTrue(max(final_levels) == 0)
def get_levels(self, data):
(out, err, rc) = data
return map(int, re.findall("SSTable Level: ([0-9])", out))
def wait_for_compactions(self, node):
pattern = re.compile("pending tasks: 0")
while True:
output, err, _ = node.nodetool("compactionstats")
if pattern.search(output):
break
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12275',
flaky=False,
notes='windows')
@known_failure(failure_source='test',
jira_url='https://issues.apache.org/jira/browse/CASSANDRA-12519',
flaky=True)
def sstableofflinerelevel_test(self):
"""
Generate sstables of varying levels.
Reset sstables to L0 with sstablelevelreset
Run sstableofflinerelevel and ensure tables are promoted correctly
Also test a variety of bad inputs including nonexistent keyspace and sstables
@since 2.1.5
@jira_ticket CASSANRDA-8031
"""
cluster = self.cluster
cluster.set_configuration_options(values={'compaction_throughput_mb_per_sec': 0})
cluster.populate(1).start(wait_for_binary_proto=True)
node1 = cluster.nodelist()[0]
# NOTE - As of now this does not return when it encounters Exception and causes test to hang, temporarily commented out
# test by trying to run on nonexistent keyspace
# cluster.stop(gently=False)
# output, error, rc = node1.run_sstableofflinerelevel("keyspace1", "standard1", output=True)
# self.assertTrue("java.lang.IllegalArgumentException: Unknown keyspace/columnFamily keyspace1.standard1" in error)
# # this should return exit code 1
# self.assertEqual(rc, 1, msg=str(rc))
# cluster.start()
# now test by generating keyspace but not flushing sstables
node1.stress(['write', 'n=1', 'no-warmup',
'-schema', 'replication(factor=1)',
'-col', 'n=FIXED(10)', 'SIZE=FIXED(1024)',
'-rate', 'threads=8'])
cluster.stop(gently=False)
try:
output, error, _ = node1.run_sstableofflinerelevel("keyspace1", "standard1")
except ToolError as e:
self.assertIn("No sstables to relevel for keyspace1.standard1", e.stdout)
self.assertEqual(e.exit_status, 1, msg=str(e.exit_status))
# test by flushing (sstable should be level 0)
cluster.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node1)
debug("Altering compaction strategy to LCS")
session.execute("ALTER TABLE keyspace1.standard1 with compaction={'class': 'LeveledCompactionStrategy', 'sstable_size_in_mb':1};")
node1.stress(['write', 'n=1K', 'no-warmup',
'-schema', 'replication(factor=1)',
'-col', 'n=FIXED(10)', 'SIZE=FIXED(1024)',
'-rate', 'threads=8'])
node1.flush()
cluster.stop()
output, _, rc = node1.run_sstableofflinerelevel("keyspace1", "standard1")
self.assertIn("L0=1", output)
self.assertEqual(rc, 0, msg=str(rc))
cluster.start(wait_for_binary_proto=True)
# test by loading large amount data so we have multiple sstables
# must write enough to create more than just L1 sstables
keys = 8 * cluster.data_dir_count
node1.stress(['write', 'n={0}K'.format(keys), 'no-warmup',
'-schema', 'replication(factor=1)',
'-col', 'n=FIXED(10)', 'SIZE=FIXED(1024)',
'-rate', 'threads=8'])
node1.flush()
debug("Waiting for compactions to finish")
self.wait_for_compactions(node1)
debug("Stopping node")
cluster.stop()
debug("Done stopping node")
# Let's reset all sstables to L0
debug("Getting initial levels")
initial_levels = list(self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"])))
self.assertNotEqual([], initial_levels)
debug('initial_levels:')
debug(initial_levels)
debug("Running sstablelevelreset")
node1.run_sstablelevelreset("keyspace1", "standard1")
debug("Getting final levels")
final_levels = list(self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"])))
self.assertNotEqual([], final_levels)
debug('final levels:')
debug(final_levels)
# let's make sure there was at least 3 levels (L0, L1 and L2)
self.assertGreater(max(initial_levels), 1)
# let's check all sstables are on L0 after sstablelevelreset
self.assertEqual(max(final_levels), 0)
# time to relevel sstables
debug("Getting initial levels")
initial_levels = self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"]))
debug("Running sstableofflinerelevel")
output, error, _ = node1.run_sstableofflinerelevel("keyspace1", "standard1")
debug("Getting final levels")
final_levels = self.get_levels(node1.run_sstablemetadata(keyspace="keyspace1", column_families=["standard1"]))
debug(output)
debug(error)
debug(initial_levels)
debug(final_levels)
# let's check sstables were promoted after releveling
self.assertGreater(max(final_levels), 1)
@since('2.2')
def sstableverify_test(self):
"""
Generate sstables and test offline verification works correctly
Test on bad input: nonexistent keyspace and sstables
Test on potential situations: deleted sstables, corrupted sstables
"""
cluster = self.cluster
cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = cluster.nodelist()
# test on nonexistent keyspace
try:
(out, err, rc) = node1.run_sstableverify("keyspace1", "standard1")
except ToolError as e:
self.assertIn("Unknown keyspace/table keyspace1.standard1", e.message)
self.assertEqual(e.exit_status, 1, msg=str(e.exit_status))
# test on nonexistent sstables:
node1.stress(['write', 'n=100', 'no-warmup', '-schema', 'replication(factor=3)',
'-rate', 'threads=8'])
(out, err, rc) = node1.run_sstableverify("keyspace1", "standard1")
self.assertEqual(rc, 0, msg=str(rc))
# Generate multiple sstables and test works properly in the simple case
node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)',
'-rate', 'threads=8'])
node1.flush()
node1.stress(['write', 'n=100K', 'no-warmup', '-schema', 'replication(factor=3)',
'-rate', 'threads=8'])
node1.flush()
cluster.stop()
(out, error, rc) = node1.run_sstableverify("keyspace1", "standard1")
self.assertEqual(rc, 0, msg=str(rc))
# STDOUT of the sstableverify command consists of multiple lines which may contain
# Java-normalized paths. To later compare these with Python-normalized paths, we
# map over each line of out and replace Java-normalized paths with Python equivalents.
outlines = map(lambda line: re.sub("(?<=path=').*(?=')",
lambda match: os.path.normcase(match.group(0)),
line),
out.splitlines())
# check output is correct for each sstable
sstables = self._get_final_sstables(node1, "keyspace1", "standard1")
for sstable in sstables:
verified = False
hashcomputed = False
for line in outlines:
if sstable in line:
if "Verifying BigTableReader" in line:
verified = True
elif "Checking computed hash of BigTableReader" in line:
hashcomputed = True
else:
debug(line)
debug(verified)
debug(hashcomputed)
debug(sstable)
self.assertTrue(verified and hashcomputed)
# now try intentionally corrupting an sstable to see if hash computed is different and error recognized
sstable1 = sstables[1]
with open(sstable1, 'r') as f:
sstabledata = bytearray(f.read())
with open(sstable1, 'w') as out:
position = random.randrange(0, len(sstabledata))
sstabledata[position] = (sstabledata[position] + 1) % 256
out.write(sstabledata)
# use verbose to get some coverage on it
try:
(out, error, rc) = node1.run_sstableverify("keyspace1", "standard1", options=['-v'])
except ToolError as e:
# Process sstableverify output to normalize paths in string to Python casing as above
error = re.sub("(?<=Corrupted: ).*", lambda match: os.path.normcase(match.group(0)), e.message)
self.assertIn("Corrupted: " + sstable1, error)
self.assertEqual(e.exit_status, 1, msg=str(e.exit_status))
def sstableexpiredblockers_test(self):
cluster = self.cluster
cluster.populate(1).start(wait_for_binary_proto=True)
[node1] = cluster.nodelist()
session = self.patient_cql_connection(node1)
self.create_ks(session, 'ks', 1)
session.execute("create table ks.cf (key int PRIMARY KEY, val int) with gc_grace_seconds=0")
# create a blocker:
session.execute("insert into ks.cf (key, val) values (1,1)")
node1.flush()
session.execute("delete from ks.cf where key = 2")
node1.flush()
session.execute("delete from ks.cf where key = 3")
node1.flush()
out, error, _ = node1.run_sstableexpiredblockers(keyspace="ks", column_family="cf")
self.assertIn("blocks 2 expired sstables from getting dropped", out)
def sstableupgrade_test(self):
"""
Test that sstableupgrade functions properly offline on a same-version Cassandra sstable, a
stdout message of "Found 0 sstables that need upgrading." should be returned.
"""
# Set up original node version to test for upgrade
cluster = self.cluster
testversion = cluster.version()
original_install_dir = cluster.get_install_dir()
debug('Original install dir: {}'.format(original_install_dir))
# Set up last major version to upgrade from, assuming 2.1 branch is the oldest tested version
if testversion < '2.2':
# Upgrading from 2.0->2.1 fails due to the jamm 0.2.5->0.3.0 jar update.
# ** This will happen again next time jamm version is upgraded.
# CCM doesn't handle this upgrade correctly and results in an error when flushing 2.1:
# Error opening zip file or JAR manifest missing : /home/mshuler/git/cassandra/lib/jamm-0.2.5.jar
# The 2.1 installed jamm version is 0.3.0, but bin/cassandra.in.sh used by nodetool still has 0.2.5
# (when this is fixed in CCM issue #463, install version='git:cassandra-2.0' as below)
self.skipTest('Skipping 2.1 test due to jamm.jar version upgrade problem in CCM node configuration.')
elif testversion < '3.0':
debug('Test version: {} - installing git:cassandra-2.1'.format(testversion))
cluster.set_install_dir(version='git:cassandra-2.1')
# As of 3.5, sstable format 'ma' from 3.0 is still the latest - install 2.2 to upgrade from
else:
debug('Test version: {} - installing git:cassandra-2.2'.format(testversion))
cluster.set_install_dir(version='git:cassandra-2.2')
# Start up last major version, write out an sstable to upgrade, and stop node
cluster.populate(1).start(wait_for_binary_proto=True)
[node1] = cluster.nodelist()
# Check that node1 is actually what we expect
debug('Downgraded install dir: {}'.format(node1.get_install_dir()))
session = self.patient_cql_connection(node1)
self.create_ks(session, 'ks', 1)
session.execute('create table ks.cf (key int PRIMARY KEY, val int) with gc_grace_seconds=0')
session.execute('insert into ks.cf (key, val) values (1,1)')
node1.flush()
cluster.stop()
debug('Beginning ks.cf sstable: {}'.format(node1.get_sstables(keyspace='ks', column_family='cf')))
# Upgrade Cassandra to original testversion and run sstableupgrade
cluster.set_install_dir(original_install_dir)
# Check that node1 is actually upgraded
debug('Upgraded to original install dir: {}'.format(node1.get_install_dir()))
# Perform a node start/stop so system tables get internally updated, otherwise we may get "Unknown keyspace/table ks.cf"
cluster.start(wait_for_binary_proto=True)
node1.flush()
cluster.stop()
(out, error, rc) = node1.run_sstableupgrade(keyspace='ks', column_family='cf')
debug(out)
debug(error)
debug('Upgraded ks.cf sstable: {}'.format(node1.get_sstables(keyspace='ks', column_family='cf')))
self.assertIn('Found 1 sstables that need upgrading.', out)
# Check that sstableupgrade finds no upgrade needed on current version.
(out, error, rc) = node1.run_sstableupgrade(keyspace='ks', column_family='cf')
debug(out)
debug(error)
self.assertIn('Found 0 sstables that need upgrading.', out)
@since('3.0')
def sstabledump_test(self):
"""
Test that sstabledump functions properly offline to output the contents of a table.
"""
cluster = self.cluster
cluster.populate(1).start(wait_for_binary_proto=True)
[node1] = cluster.nodelist()
session = self.patient_cql_connection(node1)
self.create_ks(session, 'ks', 1)
session.execute('create table ks.cf (key int PRIMARY KEY, val int) with gc_grace_seconds=0')
session.execute('insert into ks.cf (key, val) values (1,1)')
node1.flush()
cluster.stop()
[(out, error, rc)] = node1.run_sstabledump(keyspace='ks', column_families=['cf'])
debug(out)
debug(error)
# Load the json output and check that it contains the inserted key=1
s = json.loads(out)
debug(s)
self.assertEqual(len(s), 1)
dumped_row = s[0]
self.assertEqual(dumped_row['partition']['key'], ['1'])
# Check that we only get the key back using the enumerate option
[(out, error, rc)] = node1.run_sstabledump(keyspace='ks', column_families=['cf'], enumerate_keys=True)
debug(out)
debug(error)
s = json.loads(out)
debug(s)
self.assertEqual(len(s), 1)
dumped_row = s[0][0]
self.assertEqual(dumped_row, '1')
def _check_stderr_error(self, error):
acceptable = ["Max sstable size of", "Consider adding more capacity", "JNA link failure", "Class JavaLaunchHelper is implemented in both"]
if len(error) > 0:
for line in error.splitlines():
self.assertTrue(any([msg in line for msg in acceptable]),
'Found line \n\n"{line}"\n\n in error\n\n{error}'.format(line=line, error=error))
def _get_final_sstables(self, node, ks, table):
"""
Return the node final sstable data files, excluding the temporary tables.
If sstableutil exists (>= 3.0) then we rely on this tool since the table
file names no longer contain tmp in their names (CASSANDRA-7066).
"""
# Get all sstable data files
allsstables = map(os.path.normcase, node.get_sstables(ks, table))
# Remove any temporary files
tool_bin = node.get_tool('sstableutil')
if os.path.isfile(tool_bin):
args = [tool_bin, '--type', 'tmp', ks, table]
env = common.make_cassandra_env(node.get_install_cassandra_root(), node.get_node_cassandra_root())
p = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
tmpsstables = map(os.path.normcase, stdout.splitlines())
ret = list(set(allsstables) - set(tmpsstables))
else:
ret = [sstable for sstable in allsstables if "tmp" not in sstable[50:]]
return ret
| |
#!/usr/bin/env python
"""
This script allows one to submit resources (URLs and files) for analysis.
The submission consists of 2 steps: URL submission and result retrieval.
1. To request the processing of a resource, use the -s option with the URL
or filename to be checked:
$ python submit_to_wepawet.py -s 'http://www.cs.ucsb.edu/'
<?xml version="1.0" encoding="utf-8" ?>
<response state="ok">
<hash>d6aeabfce7d73b7262030333bea80c24</hash>
</response>
The returned 'hash' uniquely identifies the analysis request. In case of
errors, say unresolvable domain, you'll get back an error message.
2. To query the status of a processing request, use the -q option with
the hash returned from the previous submission:
$ python submit_to_wepawet.py -q d6aeabfce7d73b7262030333bea80c24
<?xml version="1.0" encoding="utf-8" ?>
<response state="ok">
<status>processed</status>
<url><![CDATA[http://www.cs.ucsb.edu]]></url>
<report_url><![CDATA[http://192.168.2.2/view.php?type=js&hash=a60d8d724f03053591be2ed13ed62164&t=1249552727]]></report_url>
<result>benign</result>
</response>
If the resource has not been processed yet, you'll get a response
similar to:
<response state="ok">
<status>queued</status>
</response>
Limitations:
- only HTML/JS resources can be analyzed (no flash)
Comments/problems/bugs to wepawet@cs.ucsb.edu.
"""
import cgi
import getopt
import httplib
import itertools
import mimetools
import mimetypes
import os
import sys
import urllib
import urllib2
import urlparse
SERVER = 'wepawet.iseclab.org'
SUBMIT_PATH = '/services/upload.php'
QUERY_PATH = '/services/query.php'
DOMAIN_PATH = '/services/domain.php'
URL_PATH = '/services/url.php'
ADDTL_HDRS_SEP = '@llweb_hdrs_sep@'
def __init_socks(socks_host, socks_port):
try:
import socket
import socks
except:
print >>sys.stderr, """To connect via a SOCKS proxy you need the socks module.
It can be downloaded from http://socksipy.sourceforge.net
"""
sys.exit(2)
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, socks_host, socks_port)
socket.socket = socks.socksocket
class __MultiPartForm(object):
"""Accumulate the data to be used when posting a form.
Copied from D. Hellmann, http://broadcast.oreilly.com/2009/07/pymotw-urllib2.html"""
def __init__(self):
self.form_fields = []
self.files = []
self.boundary = mimetools.choose_boundary()
return
def get_content_type(self):
return 'multipart/form-data; boundary=%s' % self.boundary
def add_field(self, name, value):
"""Add a simple field to the form data."""
self.form_fields.append((name, value))
return
def add_file(self, fieldname, filename, fileHandle, mimetype=None):
"""Add a file to be uploaded."""
body = fileHandle.read()
if mimetype is None:
mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
self.files.append((fieldname, filename, mimetype, body))
return
def __str__(self):
"""Return a string representing the form data, including attached files."""
# Build a list of lists, each containing "lines" of the
# request. Each part is separated by a boundary string.
# Once the list is built, return a string where each
# line is separated by '\r\n'.
parts = []
part_boundary = '--' + self.boundary
# Add the form fields
parts.extend(
[ part_boundary,
'Content-Disposition: form-data; name="%s"' % name,
'',
value,
]
for name, value in self.form_fields
)
# Add the files to upload
parts.extend(
[ part_boundary,
'Content-Disposition: file; name="%s"; filename="%s"' % \
(field_name, filename),
'Content-Type: %s' % content_type,
'',
body,
]
for field_name, filename, content_type, body in self.files
)
# Flatten the list and add closing boundary marker,
# then return CR+LF separated data
flattened = list(itertools.chain(*parts))
flattened.append('--' + self.boundary + '--')
flattened.append('')
return '\r\n'.join(flattened)
class __DoNotRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_301(self, req, fp, code, msg, headers):
return fp
def http_error_302(self, req, fp, code, msg, headers):
fp.url = urlparse.urljoin(req.get_full_url(), headers['Location'])
return fp
def wepawet_submit_file(file, analysis_opts, user=None, passwd=None):
form = __MultiPartForm()
form.add_field('resource_type', 'js')
if user:
form.add_field('user', user)
if passwd:
form.add_field('passwd', passwd)
if analysis_opts.referer:
form.add_field('referer', analysis_opts.referer)
if analysis_opts.proxy:
form.add_field("proxy", analysis_opts.proxy)
if len(analysis_opts.addtl_headers) > 0:
form.add_field("addtl_headers", ADDTL_HDRS_SEP.join(analysis_opts.addtl_headers))
form.add_file('file', os.path.basename(file), fileHandle=open(file))
body = str(form)
req = urllib2.Request('http://' + SERVER + SUBMIT_PATH)
req.add_header('Content-type', form.get_content_type())
req.add_header('Content-length', len(body))
req.add_data(body)
response = urllib2.urlopen(req)
res = response.read()
return res
def wepawet_submit(url, user=None, passwd=None):
p = {
'resource_type': 'js',
'url': url
}
if user:
p['user'] = user
if passwd:
p['passwd'] = passwd
#if analysis_opts.referer:
# p['referer'] = analysis_opts.referer
#if analysis_opts.proxy:
#p["proxy"] = analysis_opts.proxy
#if len(analysis_opts.addtl_headers) > 0:
#p["addtl_headers"] = ADDTL_HDRS_SEP.join(analysis_opts.addtl_headers)
params = urllib.urlencode(p)
req = urllib2.Request('http://' + SERVER + SUBMIT_PATH, params)
response = urllib2.urlopen(req)
res = response.read()
return res
def wepawet_query(task_id):
params = urllib.urlencode({
'resource_type': 'js',
'hash': task_id
})
req = urllib2.Request('http://' + SERVER + QUERY_PATH + '?' + params)
response = urllib2.urlopen(req)
res = response.read()
return res
def wepawet_domain(domain):
p = {
'resource_type': 'js',
'domain': domain
}
params = urllib.urlencode(p)
req = urllib2.Request('http://' + SERVER + DOMAIN_PATH + '?' + params)
response = urllib2.urlopen(req)
res = response.read()
return res
def wepawet_url(url):
p = {
'resource_type': 'js',
'url': url
}
params = urllib.urlencode(p)
req = urllib2.Request('http://' + SERVER + URL_PATH + '?' + params)
response = urllib2.urlopen(req)
res = response.read()
return res
def usage(cmd):
print """Usage: cmd OPTIONS
-C,--credentials USER:PASSWD use the given credentials
-d,--domain DOMAIN query if DOMAIN has been analyzed
-h,--help print this message and exit
-H,--header additional browser's header
-p,--socks-proxy-host proxy host
-P,--socks-proxy-port proxy port
-q,--query TASK_ID query the status of a request
-r,--referer URL use URL as the initial referer
-s,--submit URL submit URL for analysis
-S,--server SERVER analysis server
-u,--url URL query if URL has been analyzed
-v,--verbose be verbose
-x,--browser-proxy browser's proxy
"""
class AnalysisOptions:
addtl_headers = []
proxy = None
referer = None
def main(argv=sys.argv):
global SERVER
try:
opts, args = getopt.getopt(argv[1:],
"C:d:hH:p:P:q:r:s:S:u:vx:",
["credentials=", "domain=", "help", "header=",
"proxy-host=", "proxy-port=",
"query=", "referer=", "submit=", "server=", "url=",
"verbose", "browser-proxy"])
except getopt.GetoptError, e:
print str(e)
usage(argv[0])
return 1
action = None
resource = None
task_id = None
verbose = False
socks_proxy_host = socks_proxy_port = None
user = passwd = None
analysis_opts = AnalysisOptions()
for o,a in opts:
if o in ('-C', '--credentials'):
try:
user, passwd = a.split(":", 1)
except ValueError:
print >>sys.stderr, "Invalid credentials format (USER:PASSWD)"
return 1
elif o in ('-d', '--domain'):
action = 'domain'
resource = a
elif o in ('-h', '--help'):
usage(argv[0])
return 0
elif o in ('-H', '--header'):
analysis_opts.addtl_headers.append(a)
elif o in ('-p', '--proxy-host'):
socks_proxy_host = a
elif o in ('-P', '--proxy-port'):
socks_proxy_port = int(a)
elif o in ('-q', '--query'):
action = 'query'
task_id = a
elif o in ('-r', '--referer'):
analysis_opts.referer = a
elif o in ('-s', '--submit'):
action = 'submit'
resource = a
elif o in ('-S', '--server'):
SERVER = a
elif o in ('-u', '--url'):
action = 'url'
resource = a
elif o in ('-v', '--verbose'):
verbose = True
elif o in ('-x', '--browser-proxy'):
analysis_opts.proxy = a
if action is None:
usage(argv[0])
return 1
if resource is not None and task_id is not None:
usage(argv[0])
return 1
if socks_proxy_port or socks_proxy_host:
if not socks_proxy_port:
socks_proxy_port = 8888
if not socks_proxy_host:
socks_host = "localhost"
__init_socks(socks_proxy_host, socks_proxy_port)
httplib.HTTPConnection.debuglevel = 1
if action == 'submit':
if os.path.exists(resource):
r = wepawet_submit_file(resource, analysis_opts, user, passwd)
else:
r = wepawet_submit(resource, analysis_opts, user, passwd)
elif action == 'query':
r = wepawet_query(task_id)
elif action == 'domain':
r = wepawet_domain(resource)
elif action == 'url':
r = wepawet_url(resource)
print r
if __name__ == "__main__":
sys.exit(main())
| |
#
# Module providing the `Process` class which emulates `threading.Thread`
#
# multiprocessing/process.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = ['BaseProcess', 'current_process', 'active_children']
#
# Imports
#
import os
import sys
import signal
import itertools
from _weakrefset import WeakSet
#
#
#
try:
ORIGINAL_DIR = os.path.abspath(os.getcwd())
except OSError:
ORIGINAL_DIR = None
#
# Public functions
#
def current_process():
'''
Return process object representing the current process
'''
return _current_process
def active_children():
'''
Return list of process objects corresponding to live child processes
'''
_cleanup()
return list(_children)
#
#
#
def _cleanup():
# check for processes which have finished
for p in list(_children):
if p._popen.poll() is not None:
_children.discard(p)
#
# The `Process` class
#
class BaseProcess(object):
'''
Process objects represent activity that is run in a separate process
The class is analogous to `threading.Thread`
'''
def _Popen(self):
raise NotImplementedError
def __init__(self, group=None, target=None, name=None, args=(), kwargs={},
*, daemon=None):
assert group is None, 'group argument must be None for now'
count = next(_process_counter)
self._identity = _current_process._identity + (count,)
self._config = _current_process._config.copy()
self._parent_pid = os.getpid()
self._popen = None
self._target = target
self._args = tuple(args)
self._kwargs = dict(kwargs)
self._name = name or type(self).__name__ + '-' + \
':'.join(str(i) for i in self._identity)
if daemon is not None:
self.daemon = daemon
_dangling.add(self)
def run(self):
'''
Method to be run in sub-process; can be overridden in sub-class
'''
if self._target:
self._target(*self._args, **self._kwargs)
def start(self):
'''
Start child process
'''
assert self._popen is None, 'cannot start a process twice'
assert self._parent_pid == os.getpid(), \
'can only start a process object created by current process'
assert not _current_process._config.get('daemon'), \
'daemonic processes are not allowed to have children'
_cleanup()
self._popen = self._Popen(self)
self._sentinel = self._popen.sentinel
_children.add(self)
def terminate(self):
'''
Terminate process; sends SIGTERM signal or uses TerminateProcess()
'''
self._popen.terminate()
def join(self, timeout=None):
'''
Wait until child process terminates
'''
assert self._parent_pid == os.getpid(), 'can only join a child process'
assert self._popen is not None, 'can only join a started process'
res = self._popen.wait(timeout)
if res is not None:
_children.discard(self)
def is_alive(self):
'''
Return whether process is alive
'''
if self is _current_process:
return True
assert self._parent_pid == os.getpid(), 'can only test a child process'
if self._popen is None:
return False
self._popen.poll()
return self._popen.returncode is None
@property
def name(self):
return self._name
@name.setter
def name(self, name):
assert isinstance(name, str), 'name must be a string'
self._name = name
@property
def daemon(self):
'''
Return whether process is a daemon
'''
return self._config.get('daemon', False)
@daemon.setter
def daemon(self, daemonic):
'''
Set whether process is a daemon
'''
assert self._popen is None, 'process has already started'
self._config['daemon'] = daemonic
@property
def authkey(self):
return self._config['authkey']
@authkey.setter
def authkey(self, authkey):
'''
Set authorization key of process
'''
self._config['authkey'] = AuthenticationString(authkey)
@property
def exitcode(self):
'''
Return exit code of process or `None` if it has yet to stop
'''
if self._popen is None:
return self._popen
return self._popen.poll()
@property
def ident(self):
'''
Return identifier (PID) of process or `None` if it has yet to start
'''
if self is _current_process:
return os.getpid()
else:
return self._popen and self._popen.pid
pid = ident
@property
def sentinel(self):
'''
Return a file descriptor (Unix) or handle (Windows) suitable for
waiting for process termination.
'''
try:
return self._sentinel
except AttributeError:
raise ValueError("process not started")
def __repr__(self):
if self is _current_process:
status = 'started'
elif self._parent_pid != os.getpid():
status = 'unknown'
elif self._popen is None:
status = 'initial'
else:
if self._popen.poll() is not None:
status = self.exitcode
else:
status = 'started'
if type(status) is int:
if status == 0:
status = 'stopped'
else:
status = 'stopped[%s]' % _exitcode_to_name.get(status, status)
return '<%s(%s, %s%s)>' % (type(self).__name__, self._name,
status, self.daemon and ' daemon' or '')
##
def _bootstrap(self):
from . import util, context
global _current_process, _process_counter, _children
try:
if self._start_method is not None:
context._force_start_method(self._start_method)
_process_counter = itertools.count(1)
_children = set()
util._close_stdin()
old_process = _current_process
_current_process = self
try:
util._finalizer_registry.clear()
util._run_after_forkers()
finally:
# delay finalization of the old process object until after
# _run_after_forkers() is executed
del old_process
util.info('child process calling self.run()')
try:
self.run()
exitcode = 0
finally:
util._exit_function()
except SystemExit as e:
if not e.args:
exitcode = 1
elif isinstance(e.args[0], int):
exitcode = e.args[0]
else:
sys.stderr.write(str(e.args[0]) + '\n')
exitcode = 1
except:
exitcode = 1
import traceback
sys.stderr.write('Process %s:\n' % self.name)
traceback.print_exc()
finally:
util.info('process exiting with exitcode %d' % exitcode)
sys.stdout.flush()
sys.stderr.flush()
return exitcode
#
# We subclass bytes to avoid accidental transmission of auth keys over network
#
class AuthenticationString(bytes):
def __reduce__(self):
from .context import get_spawning_popen
if get_spawning_popen() is None:
raise TypeError(
'Pickling an AuthenticationString object is '
'disallowed for security reasons'
)
return AuthenticationString, (bytes(self),)
#
# Create object representing the main process
#
class _MainProcess(BaseProcess):
def __init__(self):
self._identity = ()
self._name = 'MainProcess'
self._parent_pid = None
self._popen = None
self._config = {'authkey': AuthenticationString(os.urandom(32)),
'semprefix': '/mp'}
# Note that some versions of FreeBSD only allow named
# semaphores to have names of up to 14 characters. Therefore
# we choose a short prefix.
#
# On MacOSX in a sandbox it may be necessary to use a
# different prefix -- see #19478.
#
# Everything in self._config will be inherited by descendant
# processes.
_current_process = _MainProcess()
_process_counter = itertools.count(1)
_children = set()
del _MainProcess
#
# Give names to some return codes
#
_exitcode_to_name = {}
for name, signum in list(signal.__dict__.items()):
if name[:3]=='SIG' and '_' not in name:
_exitcode_to_name[-signum] = name
# For debug and leak testing
_dangling = WeakSet()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` acting like a diagonal matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
__all__ = ["LinearOperatorDiag",]
class LinearOperatorDiag(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] square diagonal matrix.
This operator acts like a [batch] diagonal matrix `A` with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
`LinearOperatorDiag` is initialized with a (batch) vector.
```python
# Create a 2 x 2 diagonal linear operator.
diag = [1., -1.]
operator = LinearOperatorDiag(diag)
operator.to_dense()
==> [[1., 0.]
[0., -1.]]
operator.shape
==> [2, 2]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [2, 4] Tensor
operator.matmul(x)
==> Shape [2, 4] Tensor
# Create a [2, 3] batch of 4 x 4 linear operators.
diag = tf.random_normal(shape=[2, 3, 4])
operator = LinearOperatorDiag(diag)
# Create a shape [2, 1, 4, 2] vector. Note that this shape is compatible
# since the batch dimensions, [2, 1], are brodcast to
# operator.batch_shape = [2, 3].
y = tf.random_normal(shape=[2, 1, 4, 2])
x = operator.solve(y)
==> operator.matmul(x) = y
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [C1,...,Cc] + [N, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
#### Performance
Suppose `operator` is a `LinearOperatorDiag` of shape `[N, N]`,
and `x.shape = [N, R]`. Then
* `operator.matmul(x)` involves `N * R` multiplications.
* `operator.solve(x)` involves `N` divisions and `N * R` multiplications.
* `operator.determinant()` involves a size `N` `reduce_prod`.
If instead `operator` and `x` have shape `[B1,...,Bb, N, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
diag,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorDiag"):
r"""Initialize a `LinearOperatorDiag`.
Args:
diag: Shape `[B1,...,Bb, N]` `Tensor` with `b >= 0` `N >= 0`.
The diagonal of the operator. Allowed dtypes: `float32`, `float64`,
`complex64`, `complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. If `diag.dtype` is real, this is auto-set to `True`.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
TypeError: If `diag.dtype` is not an allowed type.
ValueError: If `diag.dtype` is real, and `is_self_adjoint` is not `True`.
"""
with ops.name_scope(name, values=[diag]):
self._diag = ops.convert_to_tensor(diag, name="diag")
self._check_diag(self._diag)
# Check and auto-set hints.
if not self._diag.dtype.is_complex:
if is_self_adjoint is False:
raise ValueError("A real diagonal operator is always self adjoint.")
else:
is_self_adjoint = True
if is_square is False:
raise ValueError("Only square diagonal operators currently supported.")
is_square = True
super(LinearOperatorDiag, self).__init__(
dtype=self._diag.dtype,
graph_parents=[self._diag],
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
def _check_diag(self, diag):
"""Static check of diag."""
allowed_dtypes = [
dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
dtype = diag.dtype
if dtype not in allowed_dtypes:
raise TypeError(
"Argument diag must have dtype in %s. Found: %s"
% (allowed_dtypes, dtype))
if diag.get_shape().ndims is not None and diag.get_shape().ndims < 1:
raise ValueError("Argument diag must have at least 1 dimension. "
"Found: %s" % diag)
def _shape(self):
# If d_shape = [5, 3], we return [5, 3, 3].
d_shape = self._diag.get_shape()
return d_shape.concatenate(d_shape[-1:])
def _shape_tensor(self):
d_shape = array_ops.shape(self._diag)
k = d_shape[-1]
return array_ops.concat((d_shape, [k]), 0)
def _assert_non_singular(self):
return linear_operator_util.assert_no_entries_with_modulus_zero(
self._diag,
message="Singular operator: Diagonal contained zero values.")
def _assert_positive_definite(self):
if self.dtype.is_complex:
message = (
"Diagonal operator had diagonal entries with non-positive real part, "
"thus was not positive definite.")
else:
message = (
"Real diagonal operator had non-positive diagonal entries, "
"thus was not positive definite.")
return check_ops.assert_positive(
math_ops.real(self._diag),
message=message)
def _assert_self_adjoint(self):
return linear_operator_util.assert_zero_imag_part(
self._diag,
message=(
"This diagonal operator contained non-zero imaginary values. "
" Thus it was not self-adjoint."))
def _matmul(self, x, adjoint=False, adjoint_arg=False):
diag_term = math_ops.conj(self._diag) if adjoint else self._diag
x = linalg.adjoint(x) if adjoint_arg else x
diag_mat = array_ops.expand_dims(diag_term, -1)
return diag_mat * x
def _determinant(self):
return math_ops.reduce_prod(self._diag, reduction_indices=[-1])
def _log_abs_determinant(self):
return math_ops.reduce_sum(
math_ops.log(math_ops.abs(self._diag)), reduction_indices=[-1])
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
diag_term = math_ops.conj(self._diag) if adjoint else self._diag
rhs = linalg.adjoint(rhs) if adjoint_arg else rhs
inv_diag_mat = array_ops.expand_dims(1. / diag_term, -1)
return rhs * inv_diag_mat
def _to_dense(self):
return array_ops.matrix_diag(self._diag)
def _diag_part(self):
return self.diag
def _add_to_tensor(self, x):
x_diag = array_ops.matrix_diag_part(x)
new_diag = self._diag + x_diag
return array_ops.matrix_set_diag(x, new_diag)
@property
def diag(self):
return self._diag
| |
import ply.lex as lex
# resultado del analisis
resultado_lexema = []
reservada = (
# Palabras Reservadas
'INCLUDE',
'USING',
'NAMESPACE',
'STD',
'COUT',
'CIN',
'GET',
'CADENA',
'RETURN',
'VOID',
'INT',
'ENDL',
)
tokens = reservada + (
'IDENTIFICADOR',
'ENTERO',
'ASIGNAR',
'SUMA',
'RESTA',
'MULT',
'DIV',
'POTENCIA',
'MODULO',
'MINUSMINUS',
'PLUSPLUS',
#Condiones
'SI',
'SINO',
#Ciclos
'MIENTRAS',
'PARA',
#logica
'AND',
'OR',
'NOT',
'MENORQUE',
'MENORIGUAL',
'MAYORQUE',
'MAYORIGUAL',
'IGUAL',
'DISTINTO',
# Symbolos
'NUMERAL',
'PARIZQ',
'PARDER',
'CORIZQ',
'CORDER',
'LLAIZQ',
'LLADER',
# Otros
'PUNTOCOMA',
'COMA',
'COMDOB',
'MAYORDER', #>>
'MAYORIZQ', #<<
)
# Reglas de Expresiones Regualres para token de Contexto simple
t_SUMA = r'\+'
t_RESTA = r'-'
t_MINUSMINUS = r'\-\-'
# t_PUNTO = r'\.'
t_MULT = r'\*'
t_DIV = r'/'
t_MODULO = r'\%'
t_POTENCIA = r'(\*{2} | \^)'
t_ASIGNAR = r'='
# Expresiones Logicas
t_AND = r'\&\&'
t_OR = r'\|{2}'
t_NOT = r'\!'
t_MENORQUE = r'<'
t_MAYORQUE = r'>'
t_PUNTOCOMA = ';'
t_COMA = r','
t_PARIZQ = r'\('
t_PARDER = r'\)'
t_CORIZQ = r'\['
t_CORDER = r'\]'
t_LLAIZQ = r'{'
t_LLADER = r'}'
t_COMDOB = r'\"'
def t_INCLUDE(t):
r'include'
return t
def t_USING(t):
r'using'
return t
def t_NAMESPACE(t):
r'namespace'
return t
def t_STD(t):
r'std'
return t
def t_COUT(t):
r'cout'
return t
def t_CIN(t):
r'cin'
return t
def t_GET(t):
r'get'
return t
def t_ENDL(t):
r'endl'
return t
def t_SINO(t):
r'else'
return t
def t_SI(t):
r'if'
return t
def t_RETURN(t):
r'return'
return t
def t_VOID(t):
r'void'
return t
def t_MIENTRAS(t):
r'while'
return t
def t_PARA(t):
r'for'
return t
def t_ENTERO(t):
r'\d+'
t.value = int(t.value)
return t
def t_IDENTIFICADOR(t):
r'\w+(_\d\w)*'
return t
def t_CADENA(t):
r'\"?(\w+ \ *\w*\d* \ *)\"?'
return t
def t_NUMERAL(t):
r'\#'
return t
def t_PLUSPLUS(t):
r'\+\+'
return t
def t_MENORIGUAL(t):
r'<='
return t
def t_MAYORIGUAL(t):
r'>='
return t
def t_IGUAL(t):
r'=='
return t
def t_MAYORDER(t):
r'<<'
return t
def t_MAYORIZQ(t):
r'>>'
return t
def t_DISTINTO(t):
r'!='
return t
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_comments(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
print("Comentario de multiple linea")
def t_comments_ONELine(t):
r'\/\/(.)*\n'
t.lexer.lineno += 1
print("Comentario de una linea")
t_ignore =' \t'
def t_error( t):
global resultado_lexema
estado = "** Token no valido en la Linea {:4} Valor {:16} Posicion {:4}".format(str(t.lineno), str(t.value),
str(t.lexpos))
resultado_lexema.append(estado)
t.lexer.skip(1)
# Prueba de ingreso
def prueba(data):
global resultado_lexema
analizador = lex.lex()
analizador.input(data)
resultado_lexema.clear()
while True:
tok = analizador.token()
if not tok:
break
# print("lexema de "+tok.type+" valor "+tok.value+" linea "tok.lineno)
estado = "Linea {:4} Tipo {:16} Valor {:16} Posicion {:4}".format(str(tok.lineno),str(tok.type) ,str(tok.value), str(tok.lexpos) )
resultado_lexema.append(estado)
return resultado_lexema
# instanciamos el analizador lexico
analizador = lex.lex()
if __name__ == '__main__':
while True:
data = input("ingrese: ")
prueba(data)
print(resultado_lexema)
| |
###############################################################################
# Copyright 2012-2014 The University of Texas at Austin #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
###############################################################################
import subprocess
import datetime
import os
import re
import pprint
import hashlib
from ipf.dt import localtzoffset
from ipf.error import StepError
from ipf.log import LogDirectoryWatcher
from . import computing_manager
from . import service
#from . import computing_share
from . import execution_environment
from ipf.step import Step
import json
#from xml.dom.minidom import getDOMImplementation
from ipf.data import Data, Representation
from .entity import *
from .service import *
from .endpoint import *
from ipf.sysinfo import ResourceName
from ipf.ipfinfo import IPFInformation, IPFInformationJson, IPFInformationTxt
#######################################################################################################################
class AbstractService(Data):
def __init__(self, id, ipfinfo):
Data.__init__(self, id)
self.services = []
self.handles = []
self.ipfinfo = ipfinfo
def add(self, serv):
self.services.append(serv)
class AbstractServiceStep(Step):
def __init__(self):
Step.__init__(self)
self.requires = [IPFInformation, ResourceName]
self.produces = [AbstractService]
self.services = []
def run(self):
self.resource_name = self._getInput(ResourceName).resource_name
self.ipfinfo = [self._getInput(IPFInformation)]
servlist = AbstractService(self.resource_name, self.ipfinfo)
service_paths = []
try:
paths = os.environ["SERVICEPATH"]
service_paths.extend(paths.split(":"))
except KeyError:
raise StepError("didn't find environment variable SERVICEPATH")
for path in service_paths:
try:
packages = os.listdir(path)
except OSError:
continue
for name in packages:
#print("name of package is" +name)
#print("path is " +path)
if name.startswith("."):
continue
if name.endswith("~"):
continue
if name.endswith(".lua"):
self._addService(os.path.join(path, name), path, serv)
else:
self.info("calling addmodule w/ version")
#print("calling addmodule w/ version")
serv = service.Service()
self._addService(os.path.join(path, name), path, servlist)
self._output(servlist)
def _addService(self, path, name, servlist):
serv = service.Service()
ServiceType = ""
try:
file = open(path)
except IOError as e:
self.warning("%s" % e)
return
text = file.read()
file.close()
#print("in correct _addService")
m = re.search("Name = ([^\ ]+)\n", text)
if m is not None:
serv.Name = m.group(1).strip()
# print(serv.Name)
else:
self.debug("no name in "+path)
#print("no name in "+path)
m = re.search("Name = ([^\ ]+)\n", text)
if m is not None:
serv.Type = m.group(1).strip()
# print(serv.Type)
else:
self.debug("no type in "+path)
#print("no type in "+path)
m = re.search("Version = ([^\ ]+)\n", text)
if m is not None:
serv.Version = m.group(1).strip()
# print(serv.Version)
else:
self.debug("no Version in "+path)
#print("no Version in "+path)
m = re.search("Endpoint = ([^\ ]+)\ *\n", text)
if m is not None:
serv.Endpoint = m.group(1).strip()
#print("SERV ENDPOINT IS " +serv.Endpoint)
else:
self.debug("no endpoint in "+path)
serv.Endpoint = ""
#print("no endpoint in "+path)
m = re.findall("Capability = ([^\ ]+)\n", text)
if m is not None:
if serv.Capability is not None:
serv.Capability.append(m.group(1).lower().strip())
else:
mlower = []
for cap in m:
mlower.append(cap.lower())
serv.Capability = mlower
else:
self.debug("no Capability in "+path)
#print("no capability in "+path)
m = re.search("SupportStatus = ([^\ ]+)\n", text)
if m is not None:
serv.QualityLevel = m.group(1).strip()
else:
self.debug("no support status in "+path)
#print("no support status in "+path)
m = re.search("QualityLevel = ([^\ ]+)\n", text)
if m is not None:
serv.QualityLevel = m.group(1).strip()
else:
self.debug("no qualitylevel in "+path)
#print("no qualitylevel in "+path)
m = re.search("Keywords = ([^\ ]+)\n", text)
if m is not None:
serv.Extension["Keywords"] = list(
map(str.strip, m.group(1).split(",")))
else:
self.debug("no keywords in "+path)
#print("no keywords in "+path)
#n = re.finditer("Extensions.(.*?) = ([^\ ]+)\n",text)
n = re.finditer("Extensions.(.*?) = (.*?)\n", text)
for match in n:
serv.Extension[match.group(1).strip()] = match.group(2).strip()
st = serv.Capability[0].split(".")
#print("st is %s", st)
if st[0] == "data":
ServiceType = "StorageService"
else:
if st[0] == "information":
ServiceType = "InformationService"
else:
if st[0] == "executionmanagement":
ServiceType = "ComputingService"
else:
if st[0] == "information":
ServiceType = "InformationService"
else:
if st[0] == "login":
ServiceType = "LoginService"
else:
ServiceType = "UntypedService"
serv.resource_name = self.resource_name
endpointhash = ''
if (serv.Endpoint != ''):
endpointhashobject = hashlib.md5(str(serv.Endpoint).encode('utf-8'))
endpointhash = "-"+endpointhashobject.hexdigest()
serv.ID = "urn:glue2:%s:%s-%s%s" % (ServiceType,
serv.Name, self.resource_name, endpointhash)
serv.ServiceType = ServiceType
servlist.add(serv)
#######################################################################################################################
class AbstractServiceOgfJson(EntityOgfJson):
data_cls = Service
def __init__(self, data):
EntityOgfJson.__init__(self, data)
def get(self):
return json.dumps(self.toJson(), sort_keys=True, indent=4)
def toJson(self):
doc = {}
doc = EntityOgfJson.toJson(self)
#print("in AbstractServiceOgfJson toJson")
if len(self.data.Capability) > 0:
doc["Capability"] = self.data.Capability
if self.data.Type is not None:
doc["Type"] = self.data.Type
if self.data.QualityLevel is not None:
doc["QualityLevel"] = self.data.QualityLevel
if len(self.data.StatusInfo) > 0:
doc["StatusInfo"] = self.data.StatusInfo
if self.data.Complexity is not None:
doc["Complexity"] = self.data.Complexity
associations = {}
if len(self.data.EndpointID) > 0:
associations["EndpointID"] = self.data.EndpointID
if len(self.data.ShareID) > 0:
associations["ShareID"] = self.data.ShareID
if len(self.data.ManagerID) > 0:
associations["ManagerID"] = self.data.ManagerID
associations["ContactID"] = self.data.ContactID
associations["LocationID"] = self.data.LocationID
associations["ServiceID"] = self.data.ServiceID
doc["Associations"] = associations
return doc
class ASOgfJson(Representation):
data_cls = AbstractService
def __init__(self, data):
Representation.__init__(
self, Representation.MIME_APPLICATION_JSON, data)
def get(self):
return json.dumps(self.toJson(), sort_keys=True, indent=4)
def toJson(self):
doc = {}
doc["PublisherInfo"] = [IPFInformationJson(
ipfinfo).toJson() for ipfinfo in self.data.ipfinfo]
doc["StorageService"] = []
doc["ComputingService"] = []
doc["LoginService"] = []
doc["InformationService"] = []
doc["Endpoint"] = []
for serv in self.data.services:
if serv is not None:
endpoint = Endpoint()
endpoint.URL = serv.Endpoint
endpoint.InterfaceName = serv.Type
endpoint.InterfaceVersion = serv.Version
endpoint.Name = serv.Name
endpointhash = ''
if (serv.Endpoint != ''):
endpointhashobject = hashlib.md5(str(serv.Endpoint).encode('utf-8'))
endpointhash = "-"+endpointhashobject.hexdigest()
endpoint.ID = "urn:glue2:Endpoint:%s-%s-%s%s" % (
serv.Version, serv.Name, serv.resource_name, endpointhash)
endpoint.ServiceID = serv.ID
endpoint.QualityLevel = serv.QualityLevel
serv.EndpointID = endpoint.ID
if serv.ServiceType not in doc:
doc[serv.ServiceType] = []
doc[serv.ServiceType].append(
AbstractServiceOgfJson(serv).toJson())
doc["Endpoint"].append(EndpointOgfJson(endpoint).toJson())
return doc
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(josh11b): Forked from contrib/eager/python to test OptimizerV2 the same way
# OptimizerV1 is tested. This file should be removed once the fork is resolved.
import functools
import os
import six
from tensorflow.contrib.optimizer_v2 import adam
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import template
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as core_saver
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpointable import graph_view
from tensorflow.python.training.checkpointable import tracking
from tensorflow.python.training.checkpointable import util
class NonLayerCheckpointable(tracking.AutoCheckpointable):
def __init__(self):
super(NonLayerCheckpointable, self).__init__()
self.a_variable = util.add_variable(
self, name="a_variable", shape=[])
# pylint: disable=not-callable
class MyModel(training.Model):
"""A concrete Model for testing."""
def __init__(self):
super(MyModel, self).__init__()
self._named_dense = core.Dense(1, use_bias=True)
self._second = core.Dense(1, use_bias=False)
# We can still track Checkpointables which aren't Layers.
self._non_layer = NonLayerCheckpointable()
def call(self, values):
ret = self._second(self._named_dense(values))
return ret
class _MirroringSaveable(
core_saver.BaseSaverBuilder.ResourceVariableSaveable):
def __init__(self, primary_variable, mirrored_variable, name):
self._primary_variable = primary_variable
self._mirrored_variable = mirrored_variable
super(_MirroringSaveable, self).__init__(
self._primary_variable, "", name)
def restore(self, restored_tensors, restored_shapes):
"""Restore the same value into both variables."""
tensor, = restored_tensors
return control_flow_ops.group(
self._primary_variable.assign(tensor),
self._mirrored_variable.assign(tensor))
class CheckpointingTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testNamingWithOptimizer(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
# A nuisance Model using the same optimizer. Its slot variables should not
# go in the checkpoint, since it is never depended on.
other_model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_checkpointable = util.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
if context.executing_eagerly():
optimizer.minimize(
lambda: model(input_value),
global_step=optimizer_step)
optimizer.minimize(
lambda: other_model(input_value),
global_step=optimizer_step)
else:
train_op = optimizer.minimize(
model(input_value), global_step=optimizer_step)
optimizer.minimize(
other_model(input_value),
global_step=optimizer_step)
self.evaluate(util.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
named_variables, serialized_graph, _ = graph_view.ObjectGraphView(
root_checkpointable).serialize_object_graph()
expected_checkpoint_names = (
# Created in the root node, so no prefix.
"optimizer_step",
"model/_second/kernel",
"model/_named_dense/kernel",
"model/_named_dense/bias",
# non-Layer dependency of the model
"model/_non_layer/a_variable",
# The optimizer creates two non-slot variables
"optimizer/beta1_power",
"optimizer/beta2_power",
# Slot variables
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_second/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/v",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/m",
"model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/v",
)
suffix = "/.ATTRIBUTES/VARIABLE_VALUE"
expected_checkpoint_names = [
name + suffix for name in expected_checkpoint_names]
# The optimizer and Dense layers also save get_config() JSON
expected_checkpoint_names.extend([
"model/_second/.ATTRIBUTES/OBJECT_CONFIG_JSON",
"model/_named_dense/.ATTRIBUTES/OBJECT_CONFIG_JSON"
])
named_variables = {v.name: v for v in named_variables}
six.assertCountEqual(self, expected_checkpoint_names,
named_variables.keys())
# Check that we've mapped to the right variable objects (not exhaustive)
self.assertEqual(
"global_step",
named_variables["optimizer_step" + suffix].full_name)
self.assertEqual(
"my_model/dense_1/kernel",
named_variables["model/_second/kernel" + suffix].full_name)
self.assertEqual(
"my_model/dense/kernel",
named_variables["model/_named_dense/kernel" + suffix].full_name)
self.assertEqual(
"beta1_power",
named_variables["optimizer/beta1_power" + suffix].full_name)
self.assertEqual(
"beta2_power",
named_variables["optimizer/beta2_power" + suffix].full_name)
# Spot check the generated protocol buffers.
self.assertEqual("optimizer",
serialized_graph.nodes[0].children[1].local_name)
optimizer_node = serialized_graph.nodes[serialized_graph.nodes[0].children[
1].node_id]
self.assertEqual("beta1_power", optimizer_node.children[0].local_name)
self.assertEqual(
"beta1_power", serialized_graph.nodes[optimizer_node.children[0]
.node_id].attributes[0].full_name)
self.assertEqual(
"my_model/dense/kernel",
serialized_graph.nodes[optimizer_node.slot_variables[0]
.original_variable_node_id]
.attributes[0].full_name)
# We strip off the :0 suffix, as variable.name-based saving does.
self.assertEqual(
"my_model/dense/kernel/Adam",
serialized_graph.nodes[optimizer_node.slot_variables[0]
.slot_variable_node_id]
.attributes[0].full_name)
self.assertEqual(
"my_model/dense/kernel/Adam:0",
optimizer.get_slot(
var=model._named_dense.kernel,
name="m").name)
self.assertEqual(
"model/_named_dense/kernel" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0]
.original_variable_node_id].attributes[0].checkpoint_key)
self.assertEqual("m", optimizer_node.slot_variables[0].slot_name)
self.assertEqual(
"model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m" + suffix,
serialized_graph.nodes[
optimizer_node.slot_variables[0]
.slot_variable_node_id].attributes[0].checkpoint_key)
@test_util.run_in_graph_and_eager_modes
def testSaveRestore(self):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root_checkpointable = util.Checkpoint(
optimizer=optimizer, model=model)
input_value = constant_op.constant([[3.]])
if context.executing_eagerly():
optimizer.minimize(
lambda: model(input_value))
else:
train_op = optimizer.minimize(model(input_value))
# TODO(allenl): Make initialization more pleasant when graph building.
root_checkpointable.save_counter # pylint: disable=pointless-statement
self.evaluate(util.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(state_ops.assign(model._named_dense.variables[1], [42.]))
m_bias_slot = optimizer.get_slot(model._named_dense.variables[1], "m")
self.evaluate(state_ops.assign(m_bias_slot, [1.5]))
save_path = root_checkpointable.save(file_prefix=prefix)
self.evaluate(state_ops.assign(model._named_dense.variables[1], [43.]))
self.evaluate(state_ops.assign(root_checkpointable.save_counter, 3))
optimizer_variables = self.evaluate(optimizer.variables())
self.evaluate(state_ops.assign(m_bias_slot, [-2.]))
# Immediate restoration
status = root_checkpointable.restore(save_path=save_path).assert_consumed()
status.run_restore_ops()
self.assertAllEqual([42.], self.evaluate(model._named_dense.variables[1]))
self.assertAllEqual(1, self.evaluate(root_checkpointable.save_counter))
self.assertAllEqual([1.5], self.evaluate(m_bias_slot))
if not context.executing_eagerly():
return # Restore-on-create is only supported when executing eagerly
on_create_model = MyModel()
on_create_optimizer = adam.AdamOptimizer(
0.001,
# Preserve beta_1_power and beta_2_power when appying gradients
# so we can test that they've been restored correctly.
beta1=1.0,
beta2=1.0)
on_create_root = util.Checkpoint(
optimizer=on_create_optimizer, model=on_create_model)
# Deferred restoration
status = on_create_root.restore(save_path=save_path)
on_create_model(constant_op.constant([[3.]])) # create variables
self.assertAllEqual(1, self.evaluate(on_create_root.save_counter))
self.assertAllEqual([42.],
self.evaluate(
on_create_model._named_dense.variables[1]))
on_create_m_bias_slot = on_create_optimizer.get_slot(
on_create_model._named_dense.variables[1], "m")
# Optimizer slot variables are created when the original variable is
# restored.
self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot))
self.assertAllEqual(optimizer_variables[2:],
self.evaluate(on_create_optimizer.variables()))
dummy_var = resource_variable_ops.ResourceVariable([1.])
on_create_optimizer.minimize(loss=dummy_var.read_value)
status.assert_consumed()
beta_1_power, beta_2_power = on_create_optimizer._get_beta_accumulators()
self.assertAllEqual(optimizer_variables[0], self.evaluate(beta_1_power))
self.assertAllEqual(optimizer_variables[1], self.evaluate(beta_2_power))
# TODO(allenl): Debug garbage created by this test in python3.
def testDeferredRestorationUsageEager(self):
"""An idiomatic eager execution example."""
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = util.Checkpoint(
optimizer=optimizer, model=model,
optimizer_step=training_util.get_or_create_global_step())
root.restore(checkpoint_management.latest_checkpoint(
checkpoint_directory))
for _ in range(num_training_steps):
# TODO(allenl): Use a Dataset and serialize/checkpoint it.
input_value = constant_op.constant([[3.]])
optimizer.minimize(
lambda: model(input_value), # pylint: disable=cell-var-from-loop
global_step=root.optimizer_step)
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
root.optimizer_step.numpy())
def testUsageGraph(self):
"""Expected usage when graph building."""
with context.graph_mode():
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default():
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = util.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
input_value = constant_op.constant([[3.]])
train_op = optimizer.minimize(
model(input_value),
global_step=root.global_step)
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
with self.session(graph=ops.get_default_graph()) as session:
status = root.restore(save_path=checkpoint_path)
status.initialize_or_restore(session=session)
if checkpoint_path is None:
self.assertEqual(0, training_continuation)
with self.assertRaises(AssertionError):
status.assert_consumed()
else:
status.assert_consumed()
for _ in range(num_training_steps):
session.run(train_op)
root.save(file_prefix=checkpoint_prefix, session=session)
self.assertEqual((training_continuation + 1) * num_training_steps,
session.run(root.global_step))
self.assertEqual(training_continuation + 1,
session.run(root.save_counter))
@test_util.run_in_graph_and_eager_modes
def testAgnosticUsage(self):
"""Graph/eager agnostic usage."""
# Does create garbage when executing eagerly due to ops.Graph() creation.
num_training_steps = 10
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default(), self.test_session(
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
root = util.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
input_value = constant_op.constant([[3.]])
train_fn = functools.partial(
optimizer.minimize,
functools.partial(model, input_value),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.global_step))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: disable=cell-var-from-loop
@test_util.run_in_graph_and_eager_modes
def testWithDefun(self):
num_training_steps = 2
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
for training_continuation in range(3):
with ops.Graph().as_default(), self.test_session(
graph=ops.get_default_graph()), test_util.device(use_gpu=True):
model = MyModel()
# Don't actually train so we can test variable values
optimizer = adam.AdamOptimizer(0.)
root = util.Checkpoint(
optimizer=optimizer, model=model,
global_step=training_util.get_or_create_global_step())
checkpoint_path = checkpoint_management.latest_checkpoint(
checkpoint_directory)
status = root.restore(save_path=checkpoint_path)
def train_fn():
@function.defun
def _call_model(x):
return model(x)
with backprop.GradientTape() as tape:
loss = _call_model(constant_op.constant([[3.]]))
gradients = tape.gradient(loss, model.variables)
return optimizer.apply_gradients(zip(gradients, model.variables),
global_step=root.global_step)
if not context.executing_eagerly():
train_fn = functools.partial(
self.evaluate, train_fn())
status.initialize_or_restore()
for _ in range(num_training_steps):
train_fn()
if training_continuation > 0:
status.assert_consumed()
self.assertAllClose([[42.]], self.evaluate(model.variables[0]))
else:
self.evaluate(model.variables[0].assign([[42.]]))
root.save(file_prefix=checkpoint_prefix)
self.assertEqual((training_continuation + 1) * num_training_steps,
self.evaluate(root.global_step))
self.assertEqual(training_continuation + 1,
self.evaluate(root.save_counter))
# pylint: enable=cell-var-from-loop
def testAnonymousVarsInInit(self):
class Model(training.Model):
def __init__(self):
super(Model, self).__init__()
self.w = resource_variable_ops.ResourceVariable(0.0)
self.b = resource_variable_ops.ResourceVariable(0.0)
self.vars = [self.w, self.b]
def call(self, x):
return x * self.w + self.b
with context.eager_mode():
model = Model()
optimizer = adam.AdamOptimizer(learning_rate=0.05)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = util.Checkpoint(
model=model, optimizer=optimizer)
for _ in range(2):
checkpoint.save(checkpoint_prefix)
with backprop.GradientTape() as tape:
loss = (constant_op.constant(1.)
- model(constant_op.constant(1.))) ** 2
grad = tape.gradient(loss, model.vars)
optimizer.apply_gradients(
[(g, v) for g, v in zip(grad, model.vars)])
@test_util.run_in_graph_and_eager_modes
def testDeferredSlotRestoration(self):
checkpoint_directory = self.get_temp_dir()
root = util.Checkpoint()
root.var = util.add_variable(
root, name="var", initializer=0.)
optimizer = adam.AdamOptimizer(0.1)
if context.executing_eagerly():
optimizer.minimize(root.var.read_value)
else:
train_op = optimizer.minimize(root.var)
# Note that `optimizer` has not been added as a dependency of
# `root`. Create a one-off grouping so that slot variables for `root.var`
# get initialized too.
self.evaluate(util.gather_initializers(
util.Checkpoint(root=root, optimizer=optimizer)))
self.evaluate(train_op)
self.evaluate(state_ops.assign(root.var, 12.))
no_slots_path = root.save(os.path.join(checkpoint_directory, "no_slots"))
root.optimizer = optimizer
self.evaluate(state_ops.assign(root.var, 13.))
self.evaluate(state_ops.assign(optimizer.get_slot(name="m", var=root.var),
14.))
slots_path = root.save(os.path.join(checkpoint_directory, "with_slots"))
new_root = util.Checkpoint()
# Load the slot-containing checkpoint (deferred), then immediately overwrite
# the non-slot variable (also deferred).
slot_status = new_root.restore(slots_path)
no_slot_status = new_root.restore(no_slots_path)
with self.assertRaises(AssertionError):
no_slot_status.assert_consumed()
new_root.var = util.add_variable(
new_root, name="var", shape=[])
no_slot_status.assert_consumed()
no_slot_status.run_restore_ops()
self.assertEqual(12., self.evaluate(new_root.var))
new_root.optimizer = adam.AdamOptimizer(0.1)
with self.assertRaisesRegexp(AssertionError, "beta1_power"):
slot_status.assert_consumed()
self.assertEqual(12., self.evaluate(new_root.var))
if context.executing_eagerly():
# Slot variables are only created with restoring initializers when
# executing eagerly.
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(name="m", var=new_root.var)))
else:
self.assertIs(new_root.optimizer.get_slot(name="m", var=new_root.var),
None)
if context.executing_eagerly():
new_root.optimizer.minimize(new_root.var.read_value)
else:
train_op = new_root.optimizer.minimize(new_root.var)
# The slot variable now exists; restore() didn't create it, but we should
# now have a restore op for it.
slot_status.run_restore_ops()
self.assertEqual(14., self.evaluate(
new_root.optimizer.get_slot(name="m", var=new_root.var)))
self.evaluate(train_op)
slot_status.assert_consumed()
def testManySavesGraph(self):
"""Saves after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = util.Checkpoint()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
obj.opt = adam.AdamOptimizer(0.1)
obj.opt.minimize(obj.var.read_value())
self.evaluate(util.gather_initializers(obj))
obj.save(checkpoint_prefix)
before_ops = graph.get_operations()
obj.save(checkpoint_prefix)
self.assertEqual(before_ops, graph.get_operations())
def testManyRestoresGraph(self):
"""Restores after the first should not modify the graph."""
with context.graph_mode():
graph = ops.Graph()
with graph.as_default(), self.session(graph):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
obj = util.Checkpoint()
obj.var = variable_scope.get_variable(name="v", initializer=0.)
obj.opt = adam.AdamOptimizer(0.1)
obj.opt.minimize(obj.var.read_value())
self.evaluate(util.gather_initializers(obj))
save_path = obj.save(checkpoint_prefix)
obj.restore(save_path)
before_ops = graph.get_operations()
obj.restore(save_path)
self.assertEqual(before_ops, graph.get_operations())
def testMultipleGraphsNonSlotVariables(self):
with context.graph_mode():
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
optimizer = adam.AdamOptimizer(0.001)
# Construct a model in one graph
first_graph = ops.Graph()
first_session = session_lib.Session(graph=first_graph)
with first_graph.as_default(), first_session.as_default():
first_variable = resource_variable_ops.ResourceVariable([1.])
first_root_checkpointable = util.Checkpoint(
optimizer=optimizer, variable=first_variable)
train_op = optimizer.minimize(first_variable.read_value)
self.evaluate(util.gather_initializers(
first_root_checkpointable))
self.evaluate(train_op)
self.evaluate(first_variable.assign([1.]))
self.evaluate(optimizer.get_slot(
var=first_variable, name="m").assign([2.]))
beta_1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta_1_power.assign(3.))
# Save and load in a second graph
second_graph = ops.Graph()
with second_graph.as_default(), session_lib.Session(graph=second_graph):
second_variable = resource_variable_ops.ResourceVariable([1.])
second_root_checkpointable = util.Checkpoint(
optimizer=optimizer, variable=second_variable)
train_op = optimizer.minimize(second_variable.read_value)
second_root_checkpointable.restore(None).initialize_or_restore()
self.evaluate(train_op)
self.evaluate(second_variable.assign([4.]))
self.evaluate(optimizer.get_slot(
var=second_variable, name="m").assign([5.]))
beta_1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta_1_power.assign(6.))
save_path = second_root_checkpointable.save(checkpoint_prefix)
self.evaluate(second_variable.assign([7.]))
self.evaluate(optimizer.get_slot(
var=second_variable, name="m").assign([8.]))
beta_1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(6., self.evaluate(beta_1_power))
status = second_root_checkpointable.restore(save_path)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([4.], self.evaluate(second_variable))
self.assertAllEqual([5.], self.evaluate(optimizer.get_slot(
var=second_variable, name="m")))
beta_1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(6., self.evaluate(beta_1_power))
# Check that the first graph is unmolested
with first_graph.as_default(), first_session.as_default():
self.assertAllEqual([1.], self.evaluate(first_variable))
self.assertAllEqual([2.], self.evaluate(optimizer.get_slot(
var=first_variable, name="m")))
beta_1_power, _ = optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta_1_power))
class TemplateTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_checkpointable_save_restore(self):
def _templated():
v = variable_scope.get_variable(
"v", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
v2 = variable_scope.get_variable(
"v2", shape=[1], initializer=init_ops.zeros_initializer(),
use_resource=True)
return v, v + 1., v2
save_template = template.make_template("s1", _templated)
v1_save, _, v2_save = save_template()
optimizer = adam.AdamOptimizer(0.0)
save_root = util.Checkpoint(
my_template=save_template, optimizer=optimizer)
optimizer.minimize(v1_save.read_value)
self.evaluate([v.initializer for v in optimizer.variables()])
self.evaluate(v1_save.assign([12.]))
self.evaluate(v2_save.assign([14.]))
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
save_path = save_root.save(checkpoint_prefix)
load_template = template.make_template("s2", _templated)
load_optimizer = adam.AdamOptimizer(0.0)
load_root = util.Checkpoint(
my_template=load_template, optimizer=load_optimizer)
status = load_root.restore(save_path)
var, var_plus_one, var2 = load_template()
load_optimizer.minimize(var.read_value)
self.assertEqual(2, len(load_template._checkpoint_dependencies))
self.assertEqual("v", load_template._checkpoint_dependencies[0].name)
self.assertEqual("v2", load_template._checkpoint_dependencies[1].name)
status.assert_consumed().run_restore_ops()
self.assertAllEqual([12.], self.evaluate(var))
self.assertAllEqual([13.], self.evaluate(var_plus_one))
self.assertAllEqual([14.], self.evaluate(var2))
class CheckpointCompatibilityTests(test.TestCase):
def _initialized_model(self):
input_value = constant_op.constant([[3.]])
model = MyModel()
optimizer = adam.AdamOptimizer(0.001)
optimizer_step = training_util.get_or_create_global_step()
root_checkpointable = util.Checkpoint(
optimizer=optimizer, model=model, optimizer_step=optimizer_step)
train_op = optimizer.minimize(
functools.partial(model, input_value),
global_step=optimizer_step)
self.evaluate(util.gather_initializers(
root_checkpointable))
self.evaluate(train_op)
# A regular variable, a slot variable, and a non-slot Optimizer variable
# with known values to check when loading.
self.evaluate(model._named_dense.bias.assign([1.]))
self.evaluate(optimizer.get_slot(
var=model._named_dense.bias, name="m").assign([2.]))
beta_1_power, _ = optimizer._get_beta_accumulators()
self.evaluate(beta_1_power.assign(3.))
return root_checkpointable
def _set_sentinels(self, root_checkpointable):
self.evaluate(root_checkpointable.model._named_dense.bias.assign([101.]))
self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, name="m")
.assign([102.]))
beta_1_power, _ = root_checkpointable.optimizer._get_beta_accumulators()
self.evaluate(beta_1_power.assign(103.))
def _check_sentinels(self, root_checkpointable):
self.assertAllEqual(
[1.], self.evaluate(root_checkpointable.model._named_dense.bias))
self.assertAllEqual([2.], self.evaluate(
root_checkpointable.optimizer.get_slot(
var=root_checkpointable.model._named_dense.bias, name="m")))
beta_1_power, _ = root_checkpointable.optimizer._get_beta_accumulators()
self.assertAllEqual(3., self.evaluate(beta_1_power))
def _write_name_based_checkpoint(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(
graph=save_graph) as session:
root = self._initialized_model()
name_saver = core_saver.Saver()
return name_saver.save(
sess=session, save_path=checkpoint_prefix,
global_step=root.optimizer_step)
@test_util.run_in_graph_and_eager_modes
def testLoadFromNameBasedSaver(self):
"""Save a name-based checkpoint, load it using the object-based API."""
with test_util.device(use_gpu=True):
save_path = self._write_name_based_checkpoint()
root = self._initialized_model()
self._set_sentinels(root)
with self.assertRaises(AssertionError):
self._check_sentinels(root)
object_saver = util.CheckpointableSaver(graph_view.ObjectGraphView(root))
self._set_sentinels(root)
status = object_saver.restore(save_path)
if context.executing_eagerly():
self._check_sentinels(root)
if context.executing_eagerly():
with self.assertRaisesRegexp(AssertionError, "OBJECT_CONFIG_JSON"):
status.assert_consumed()
else:
# When graph building, we haven't read any keys, so we don't know
# whether the restore will be complete.
with self.assertRaisesRegexp(AssertionError, "not restored"):
status.assert_consumed()
status.run_restore_ops()
self._check_sentinels(root)
self._set_sentinels(root)
status = object_saver.restore(save_path)
status.initialize_or_restore()
self._check_sentinels(root)
# TODO(allenl): Test for the core name-based saver loading object-based
# checkpoints once object-based checkpointing is in core.
def testSaveGraphLoadEager(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(
graph=save_graph) as session:
root = self._initialized_model()
save_path = root.save(
session=session, file_prefix=checkpoint_prefix)
with context.eager_mode():
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed()
self._check_sentinels(root)
def testSaveEagerLoadGraph(self):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with context.eager_mode():
root = self._initialized_model()
save_path = root.save(file_prefix=checkpoint_prefix)
with context.graph_mode():
save_graph = ops.Graph()
with save_graph.as_default(), self.test_session(graph=save_graph):
root = self._initialized_model()
self._set_sentinels(root)
root.restore(save_path).assert_consumed().run_restore_ops()
self._check_sentinels(root)
if __name__ == "__main__":
test.main()
| |
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/widgetbase.py
__version__=''' $Id$ '''
__doc__='''Base class for user-defined graphical widgets'''
import string
from reportlab.graphics import shapes
from reportlab import rl_config
from reportlab.lib import colors
from reportlab.lib.validators import *
from reportlab.lib.attrmap import *
class PropHolder:
'''Base for property holders'''
_attrMap = None
def verify(self):
"""If the _attrMap attribute is not None, this
checks all expected attributes are present; no
unwanted attributes are present; and (if a
checking function is found) checks each
attribute has a valid value. Either succeeds
or raises an informative exception.
"""
if self._attrMap is not None:
for key in self.__dict__.keys():
if key[0] <> '_':
msg = "Unexpected attribute %s found in %s" % (key, self)
assert self._attrMap.has_key(key), msg
for (attr, metavalue) in self._attrMap.items():
msg = "Missing attribute %s from %s" % (attr, self)
assert hasattr(self, attr), msg
value = getattr(self, attr)
args = (value, attr, self.__class__.__name__)
assert metavalue.validate(value), "Invalid value %s for attribute %s in class %s" % args
if rl_config.shapeChecking:
"""This adds the ability to check every attribute assignment
as it is made. It slows down shapes but is a big help when
developing. It does not get defined if rl_config.shapeChecking = 0.
"""
def __setattr__(self, name, value):
"""By default we verify. This could be off
in some parallel base classes."""
validateSetattr(self,name,value)
def getProperties(self,recur=1):
"""Returns a list of all properties which can be edited and
which are not marked as private. This may include 'child
widgets' or 'primitive shapes'. You are free to override
this and provide alternative implementations; the default
one simply returns everything without a leading underscore.
"""
from reportlab.lib.validators import isValidChild
# TODO when we need it, but not before -
# expose sequence contents?
props = {}
for name in self.__dict__.keys():
if name[0:1] <> '_':
component = getattr(self, name)
if recur and isValidChild(component):
# child object, get its properties too
childProps = component.getProperties(recur=recur)
for (childKey, childValue) in childProps.items():
#key might be something indexed like '[2].fillColor'
#or simple like 'fillColor'; in the former case we
#don't need a '.' between me and my child.
if childKey[0] == '[':
props['%s%s' % (name, childKey)] = childValue
else:
props['%s.%s' % (name, childKey)] = childValue
else:
props[name] = component
return props
def setProperties(self, propDict):
"""Permits bulk setting of properties. These may include
child objects e.g. "chart.legend.width = 200".
All assignments will be validated by the object as if they
were set individually in python code.
All properties of a top-level object are guaranteed to be
set before any of the children, which may be helpful to
widget designers.
"""
childPropDicts = {}
for (name, value) in propDict.items():
parts = string.split(name, '.', 1)
if len(parts) == 1:
#simple attribute, set it now
setattr(self, name, value)
else:
(childName, remains) = parts
try:
childPropDicts[childName][remains] = value
except KeyError:
childPropDicts[childName] = {remains: value}
# now assign to children
for (childName, childPropDict) in childPropDicts.items():
child = getattr(self, childName)
child.setProperties(childPropDict)
def dumpProperties(self, prefix=""):
"""Convenience. Lists them on standard output. You
may provide a prefix - mostly helps to generate code
samples for documentation.
"""
propList = self.getProperties().items()
propList.sort()
if prefix:
prefix = prefix + '.'
for (name, value) in propList:
print '%s%s = %s' % (prefix, name, value)
class Widget(PropHolder, shapes.UserNode):
"""Base for all user-defined widgets. Keep as simple as possible. Does
not inherit from Shape so that we can rewrite shapes without breaking
widgets and vice versa."""
def _setKeywords(self,**kw):
for k,v in kw.items():
if not self.__dict__.has_key(k):
setattr(self,k,v)
def draw(self):
msg = "draw() must be implemented for each Widget!"
raise shapes.NotImplementedError, msg
def demo(self):
msg = "demo() must be implemented for each Widget!"
raise shapes.NotImplementedError, msg
def provideNode(self):
return self.draw()
def getBounds(self):
"Return outer boundary as x1,y1,x2,y2. Can be overridden for efficiency"
return self.draw().getBounds()
class ScaleWidget(Widget):
'''Contents with a scale and offset'''
_attrMap = AttrMap(
x = AttrMapValue(isNumber,desc="x offset"),
y = AttrMapValue(isNumber,desc="y offset"),
scale = AttrMapValue(isNumber,desc="scale"),
contents = AttrMapValue(None,desc="Contained drawable elements"),
)
def __init__(self,x=0,y=0,scale=1.0,contents=None):
self.x = x
self.y = y
if not contents: contents=[]
elif not isinstance(contents,(tuple,list)):
contents = (contents,)
self.contents = list(contents)
self.scale = scale
def draw(self):
return shapes.Group(transform=(self.scale,0,0,self.scale,self.x,self.y),*self.contents)
_ItemWrapper={}
class TypedPropertyCollection(PropHolder):
"""A container with properties for objects of the same kind.
This makes it easy to create lists of objects. You initialize
it with a class of what it is to contain, and that is all you
can add to it. You can assign properties to the collection
as a whole, or to a numeric index within it; if so it creates
a new child object to hold that data.
So:
wedges = TypedPropertyCollection(WedgeProperties)
wedges.strokeWidth = 2 # applies to all
wedges.strokeColor = colors.red # applies to all
wedges[3].strokeColor = colors.blue # only to one
The last line should be taken as a prescription of how to
create wedge no. 3 if one is needed; no error is raised if
there are only two data points.
We try and make sensible use of tuple indeces.
line[(3,x)] is backed by line[(3,)], line[3] & line
"""
def __init__(self, exampleClass):
#give it same validation rules as what it holds
self.__dict__['_value'] = exampleClass()
self.__dict__['_children'] = {}
def wKlassFactory(self,Klass):
class WKlass(Klass):
def __getattr__(self,name):
try:
return self.__class__.__bases__[0].__getattr__(self,name)
except:
i = self._index
if i:
c = self._parent._children
if c.has_key(i) and c[i].__dict__.has_key(name):
return getattr(c[i],name)
elif len(i)==1:
i = i[0]
if c.has_key(i) and c[i].__dict__.has_key(name):
return getattr(c[i],name)
return getattr(self._parent,name)
return WKlass
def __getitem__(self, index):
try:
return self._children[index]
except KeyError:
Klass = self._value.__class__
if _ItemWrapper.has_key(Klass):
WKlass = _ItemWrapper[Klass]
else:
_ItemWrapper[Klass] = WKlass = self.wKlassFactory(Klass)
child = WKlass()
child._parent = self
if type(index) in (type(()),type([])):
index = tuple(index)
if len(index)>1:
child._index = tuple(index[:-1])
else:
child._index = None
else:
child._index = None
for i in filter(lambda x,K=child.__dict__.keys(): x in K,child._attrMap.keys()):
del child.__dict__[i]
self._children[index] = child
return child
def has_key(self,key):
if type(key) in (type(()),type([])): key = tuple(key)
return self._children.has_key(key)
def __setitem__(self, key, value):
msg = "This collection can only hold objects of type %s" % self._value.__class__.__name__
assert isinstance(value, self._value.__class__), msg
def __len__(self):
return len(self._children.keys())
def getProperties(self,recur=1):
# return any children which are defined and whatever
# differs from the parent
props = {}
for (key, value) in self._value.getProperties(recur=recur).items():
props['%s' % key] = value
for idx in self._children.keys():
childProps = self._children[idx].getProperties(recur=recur)
for (key, value) in childProps.items():
if not hasattr(self,key) or getattr(self, key)<>value:
newKey = '[%s].%s' % (idx, key)
props[newKey] = value
return props
def setVector(self,**kw):
for name, value in kw.items():
for i in xrange(len(value)):
setattr(self[i],name,value[i])
def __getattr__(self,name):
return getattr(self._value,name)
def __setattr__(self,name,value):
return setattr(self._value,name,value)
## No longer needed!
class StyleProperties(PropHolder):
"""A container class for attributes used in charts and legends.
Attributes contained can be those for any graphical element
(shape?) in the ReportLab graphics package. The idea for this
container class is to be useful in combination with legends
and/or the individual appearance of data series in charts.
A legend could be as simple as a wrapper around a list of style
properties, where the 'desc' attribute contains a descriptive
string and the rest could be used by the legend e.g. to draw
something like a color swatch. The graphical presentation of
the legend would be its own business, though.
A chart could be inspecting a legend or, more directly, a list
of style properties to pick individual attributes that it knows
about in order to render a particular row of the data. A bar
chart e.g. could simply use 'strokeColor' and 'fillColor' for
drawing the bars while a line chart could also use additional
ones like strokeWidth.
"""
_attrMap = AttrMap(
strokeWidth = AttrMapValue(isNumber),
strokeLineCap = AttrMapValue(isNumber),
strokeLineJoin = AttrMapValue(isNumber),
strokeMiterLimit = AttrMapValue(None),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone),
strokeOpacity = AttrMapValue(isNumber),
strokeColor = AttrMapValue(isColorOrNone),
fillColor = AttrMapValue(isColorOrNone),
desc = AttrMapValue(isString),
)
def __init__(self, **kwargs):
"Initialize with attributes if any."
for k, v in kwargs.items():
setattr(self, k, v)
def __setattr__(self, name, value):
"Verify attribute name and value, before setting it."
validateSetattr(self,name,value)
class TwoCircles(Widget):
def __init__(self):
self.leftCircle = shapes.Circle(100,100,20, fillColor=colors.red)
self.rightCircle = shapes.Circle(300,100,20, fillColor=colors.red)
def draw(self):
return shapes.Group(self.leftCircle, self.rightCircle)
class Face(Widget):
"""This draws a face with two eyes.
It exposes a couple of properties
to configure itself and hides all other details.
"""
_attrMap = AttrMap(
x = AttrMapValue(isNumber),
y = AttrMapValue(isNumber),
size = AttrMapValue(isNumber),
skinColor = AttrMapValue(isColorOrNone),
eyeColor = AttrMapValue(isColorOrNone),
mood = AttrMapValue(OneOf('happy','sad','ok')),
)
def __init__(self):
self.x = 10
self.y = 10
self.size = 80
self.skinColor = None
self.eyeColor = colors.blue
self.mood = 'happy'
def demo(self):
pass
def draw(self):
s = self.size # abbreviate as we will use this a lot
g = shapes.Group()
g.transform = [1,0,0,1,self.x, self.y]
# background
g.add(shapes.Circle(s * 0.5, s * 0.5, s * 0.5, fillColor=self.skinColor))
# left eye
g.add(shapes.Circle(s * 0.35, s * 0.65, s * 0.1, fillColor=colors.white))
g.add(shapes.Circle(s * 0.35, s * 0.65, s * 0.05, fillColor=self.eyeColor))
# right eye
g.add(shapes.Circle(s * 0.65, s * 0.65, s * 0.1, fillColor=colors.white))
g.add(shapes.Circle(s * 0.65, s * 0.65, s * 0.05, fillColor=self.eyeColor))
# nose
g.add(shapes.Polygon(
points=[s * 0.5, s * 0.6, s * 0.4, s * 0.3, s * 0.6, s * 0.3],
fillColor=None))
# mouth
if self.mood == 'happy':
offset = -0.05
elif self.mood == 'sad':
offset = +0.05
else:
offset = 0
g.add(shapes.Polygon(
points = [
s * 0.3, s * 0.2, #left of mouth
s * 0.7, s * 0.2, #right of mouth
s * 0.6, s * (0.2 + offset), # the bit going up or down
s * 0.4, s * (0.2 + offset) # the bit going up or down
],
fillColor = colors.pink,
strokeColor = colors.red,
strokeWidth = s * 0.03
))
return g
class TwoFaces(Widget):
def __init__(self):
self.faceOne = Face()
self.faceOne.mood = "happy"
self.faceTwo = Face()
self.faceTwo.x = 100
self.faceTwo.mood = "sad"
def draw(self):
"""Just return a group"""
return shapes.Group(self.faceOne, self.faceTwo)
def demo(self):
"""The default case already looks good enough,
no implementation needed here"""
pass
class Sizer(Widget):
"Container to show size of all enclosed objects"
_attrMap = AttrMap(BASE=shapes.SolidShape,
contents = AttrMapValue(isListOfShapes,desc="Contained drawable elements"),
)
def __init__(self, *elements):
self.contents = []
self.fillColor = colors.cyan
self.strokeColor = colors.magenta
for elem in elements:
self.add(elem)
def _addNamedNode(self,name,node):
'if name is not None add an attribute pointing to node and add to the attrMap'
if name:
if name not in self._attrMap.keys():
self._attrMap[name] = AttrMapValue(isValidChild)
setattr(self, name, node)
def add(self, node, name=None):
"""Appends non-None child node to the 'contents' attribute. In addition,
if a name is provided, it is subsequently accessible by name
"""
# propagates properties down
if node is not None:
assert isValidChild(node), "Can only add Shape or UserNode objects to a Group"
self.contents.append(node)
self._addNamedNode(name,node)
def getBounds(self):
# get bounds of each object
if self.contents:
b = []
for elem in self.contents:
b.append(elem.getBounds())
return shapes.getRectsBounds(b)
else:
return (0,0,0,0)
def draw(self):
g = shapes.Group()
(x1, y1, x2, y2) = self.getBounds()
r = shapes.Rect(
x = x1,
y = y1,
width = x2-x1,
height = y2-y1,
fillColor = self.fillColor,
strokeColor = self.strokeColor
)
g.add(r)
for elem in self.contents:
g.add(elem)
return g
def test():
from reportlab.graphics.charts.piecharts import WedgeProperties
wedges = TypedPropertyCollection(WedgeProperties)
wedges.fillColor = colors.red
wedges.setVector(fillColor=(colors.blue,colors.green,colors.white))
print len(_ItemWrapper)
d = shapes.Drawing(400, 200)
tc = TwoCircles()
d.add(tc)
import renderPDF
renderPDF.drawToFile(d, 'sample_widget.pdf', 'A Sample Widget')
print 'saved sample_widget.pdf'
d = shapes.Drawing(400, 200)
f = Face()
f.skinColor = colors.yellow
f.mood = "sad"
d.add(f, name='theFace')
print 'drawing 1 properties:'
d.dumpProperties()
renderPDF.drawToFile(d, 'face.pdf', 'A Sample Widget')
print 'saved face.pdf'
d2 = d.expandUserNodes()
renderPDF.drawToFile(d2, 'face_copy.pdf', 'An expanded drawing')
print 'saved face_copy.pdf'
print 'drawing 2 properties:'
d2.dumpProperties()
if __name__=='__main__':
test()
| |
""" NIC tests for VM """
import signal
import sys
import time
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.codes import PASS
from marvin.lib.base import (Account,
ServiceOffering,
Network,
VirtualMachine,
NetworkOffering)
from marvin.lib.common import (get_zone,
get_template,
get_domain)
from marvin.lib.utils import validateList
from nose.plugins.attrib import attr
class TestNic(cloudstackTestCase):
def setUp(self):
self.cleanup = []
def signal_handler(signal, frame):
self.tearDown()
sys.exit(0)
# assign the signal handler immediately
signal.signal(signal.SIGINT, signal_handler)
self.hypervisor = self.testClient.getHypervisorInfo()
try:
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.services = self.testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
domain = get_domain(self.apiclient)
self.zone = get_zone(
self.apiclient,
self.testClient.getZoneForTests()
)
# if local storage is enabled, alter the offerings to use
# localstorage
if self.zone.localstorageenabled:
self.services["service_offerings"][
"tiny"]["storagetype"] = 'local'
template = get_template(
self.apiclient,
self.zone.id,
self.services["ostype"]
)
# Set Zones and disk offerings
self.services["small"]["zoneid"] = self.zone.id
self.services["small"]["template"] = template.id
self.services["iso1"]["zoneid"] = self.zone.id
self.services["network"]["zoneid"] = self.zone.id
# Create Account, VMs, NAT Rules etc
self.account = Account.create(
self.apiclient,
self.services["account"],
domainid=domain.id
)
self.cleanup.insert(0, self.account)
self.service_offering = ServiceOffering.create(
self.apiclient,
self.services["service_offerings"]["tiny"]
)
self.cleanup.insert(0, self.service_offering)
####################
# Network offering
self.network_offering = NetworkOffering.create(
self.apiclient,
self.services["network_offering"],
)
self.cleanup.insert(0, self.network_offering)
self.network_offering.update(
self.apiclient,
state='Enabled') # Enable Network offering
self.services["network"][
"networkoffering"] = self.network_offering.id
self.network_offering_shared = NetworkOffering.create(
self.apiclient,
self.services["network_offering_shared"],
)
self.cleanup.insert(0, self.network_offering_shared)
self.network_offering_shared.update(
self.apiclient,
state='Enabled') # Enable Network offering
self.services["network2"][
"networkoffering"] = self.network_offering_shared.id
################
# Test Network
self.test_network = Network.create(
self.apiclient,
self.services["network"],
self.account.name,
self.account.domainid,
)
self.cleanup.insert(0, self.test_network)
self.test_network2 = Network.create(
self.apiclient,
self.services["network2"],
self.account.name,
self.account.domainid,
zoneid=self.services["network"]["zoneid"]
)
self.cleanup.insert(0, self.test_network2)
except Exception as ex:
self.debug("Exception during NIC test SETUP!: " + str(ex))
@attr(
tags=[
"smoke",
"advanced",
"advancedns"],
required_hardware="true")
def test_01_nic(self):
# TODO: SIMENH: add validation
"""Test to add and update added nic to a virtual machine"""
hypervisorIsVmware = False
isVmwareToolInstalled = False
if self.hypervisor.lower() == "vmware":
hypervisorIsVmware = True
self.virtual_machine = VirtualMachine.create(
self.apiclient,
self.services["small"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[self.test_network.id],
mode=self.zone.networktype if hypervisorIsVmware else "default"
)
self.cleanup.insert(0, self.virtual_machine)
vms = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine.id
)
self.assertEqual(
validateList(vms)[0],
PASS,
"vms list validation failed")
vm_response = vms[0]
self.assertEqual(
len(vm_response.nic),
1,
"Verify we only start with one nic"
)
self.assertEqual(
vm_response.nic[0].isdefault,
True,
"Verify initial adapter is set to default"
)
existing_nic_ip = vm_response.nic[0].ipaddress
existing_nic_id = vm_response.nic[0].id
self.virtual_machine.add_nic(
self.apiclient,
self.test_network2.id)
list_vm_response = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine.id
)
self.assertEqual(
len(list_vm_response[0].nic),
2,
"Verify we have 2 NIC's now"
)
# If hypervisor is Vmware, then check if
# the vmware tools are installed and the process is running
# Vmware tools are necessary for remove nic operations (vmware 5.5+)
if hypervisorIsVmware:
sshClient = self.virtual_machine.get_ssh_client()
result = str(
sshClient.execute("service vmware-tools status")).lower()
self.debug("and result is: %s" % result)
if "running" in result:
isVmwareToolInstalled = True
goForUnplugOperation = True
# If Vmware tools are not installed in case of vmware hypervisor
# then don't go further for unplug operation (remove nic) as it won't
# be supported
if hypervisorIsVmware and not isVmwareToolInstalled:
goForUnplugOperation = False
if goForUnplugOperation:
new_nic_id = ""
for nc in list_vm_response[0].nic:
if nc.ipaddress != existing_nic_ip:
new_nic_id = nc.id
self.virtual_machine.update_default_nic(self.apiclient, new_nic_id)
time.sleep(5)
list_vm_response = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine.id
)
# iterate as we don't know for sure what order our NIC's will be
# returned to us.
for nc in list_vm_response[0].nic:
if nc.ipaddress == existing_nic_ip:
self.assertEqual(
nc.isdefault,
False,
"Verify initial adapter is NOT set to default"
)
else:
self.assertEqual(
nc.isdefault,
True,
"Verify second adapter is set to default"
)
with self.assertRaises(Exception):
self.virtual_machine.remove_nic(self.apiclient, new_nic_id)
self.virtual_machine.update_default_nic(
self.apiclient,
existing_nic_id)
time.sleep(5)
self.virtual_machine.remove_nic(self.apiclient, new_nic_id)
time.sleep(5)
list_vm_response = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine.id
)
self.assertEqual(
len(list_vm_response[0].nic),
1,
"Verify we are back to a signle NIC"
)
return
def tearDown(self):
try:
for obj in self.cleanup:
try:
obj.delete(self.apiclient)
time.sleep(10)
except Exception as ex:
self.debug(
"Error deleting: " +
str(obj) +
", exception: " +
str(ex))
except Exception as e:
self.debug("Warning! Exception in tearDown: %s" % e)
| |
"""
.. codeauthor:: Kevin Kennedy <protonyx@users.noreply.github.com>
"""
import labtronyx
import time
import re
class d_3441XA(labtronyx.DriverBase):
"""
Driver for Agilent 34410A and 34411A Digital Multimeter
"""
author = 'KKENNEDY'
version = '1.0'
deviceType = 'Multimeter'
compatibleInterfaces = ['VISA']
compatibleInstruments = {
'Agilent': ['34410A', '34411A', 'L4411A']
}
@classmethod
def VISA_validResource(cls, identity):
return identity[0].upper() == 'AGILENT TECHNOLOGIES' and identity[1] in cls.compatibleInstruments['Agilent']
VALID_MODES = {
'Capacitance': 'CAP',
'Continuity': 'CONT',
'AC Current': 'CURR:AC',
'DC Current': 'CURR',
'Diode': 'DIOD',
'Frequency': 'FREQ',
'Resistance': 'RES',
'4-wire Resistance': 'FRES',
'Period': 'PER',
'Temperature': 'TEMP',
'AC Voltage': 'VOLT:AC',
'DC Voltage': 'VOLT'}
VALID_TRIGGER_SOURCES = {
'Continual': 'IMM',
'Bus': 'BUS',
'External': 'EXT'
}
ERROR_CODES = {
0: "No error",
# Execution Errors
-102: "Syntax error",
-103: "Invalid separator",
-113: "Undefined header",
-123: "Numeric overflow",
-151: "Invalid string data",
-213: "INIT ignored",
-222: "Data out of range",
-224: "Illegal parameter value: ranges must be positive",
-230: "Data stale",
-231: "Internal software error",
-292: "Referenced name does not exist",
-330: "Self-test failed",
-313: "Calibration memory lost; memory corruption detected",
-313: "Calibration memory lost; due to firmware revision change",
-314: "Save/recall memory lost; memory corruption detected",
-314: "Save/recall memory lost; due to firmware revision change",
-315: "Configuration memory lost; memory corruption detected",
-315: "Configuration memory lost; due to firmware revision change",
-330: "Self-test failed",
-350: "Error queue overflow",
-410: "Query INTERRUPTED",
-420: "Query UNTERMINATED",
# Instrument Errors
201: "Memory lost: stored state",
202: "Memory lost: power-on state",
203: "Memory lost: stored readings",
221: "Settings conflict: calculate limit state forced off",
223: "Settings conflict: trig source changed to IMM",
251: "Unsupported temperature transducer type",
263: "Not able to execute while instrument is measuring",
291: "Not able to recall state: it is empty",
305: "Not able to perform requested operation",
311: "Not able to specify resolution with Auto range",
514: "Not allowed",
521: "Communications: input buffer overflow",
522: "Communications: output buffer overflow",
532: "Not able to achieve requested resolution",
540: "Cannot use overload as math reference",
550: "Not able to execute command in local mode",
624: "Unable to sense line frequency"}
def open(self):
self._mode = ''
self.getMode()
def close(self):
self.enableFrontPanel()
def getProperties(self):
"""
Driver property keys:
* validModes
* validTriggerSources
* errorCodes
:return:
"""
# TODO: Expand docstring
return dict(
deviceVendor='Agilent Technologies',
validModes=self.VALID_MODES,
validTriggerSources=self.VALID_TRIGGER_SOURCES,
errorCodes=self.ERROR_CODES
)
def trigger(self):
"""
Used in conjunction with the Trigger Source to trigger the instrument from the remote interface. After setting
the trigger source, you must place the multimeter in the "wait-for-trigger" state by calling
:func:`waitForTrigger`.
"""
self.write("*TRG")
def waitForTrigger(self):
"""
Change the state of the triggering system from "idle" to "wait-for-trigger". Measurements will begin when the
specified trigger conditions are satisfied. Will also clear the previous set of readings from memory.
"""
self.write("INIT")
self.checkForError()
def self_test(self):
"""
Run the self-test suite
+========+================================+
| Test # | Test Name |
+========+================================+
| 600 | Front Panel Communications |
| 601 | Front Panel All On Test |
| 602 | A/D Feedback Test |
| 603 | Fine A/D Test |
| 604 | Fine A/D Linearity |
| 605 | A/D & FE Measure Zero |
| 606 | Input Amplifier x100 Zero Test |
| 607 | Input Amplifier x10 Zero Test |
| 608 | Input Amplifier x1 Zero Test |
| 609 | Input Leakage Test |
| 610 | Input Amplifier x10 Gain Test |
| 611 | Input Amplifier x1 Gain Test |
| 612 | Ohms 500nA Current Source |
| 613 | DC High Voltage Divider Test |
| 614 | Ohms 5uA Current Source Test |
| 615 | Ohms 10uA Current Source |
| 616 | Ohms 100uA to 200 Ohm Shunt |
| 617 | Ohms 1mA to 2 Ohm Shunt |
| 618 | High Current Shunt Test |
| 619 | AC 0.1VAC Zero Test |
| 620 | Precharge Amplifier Gain Test |
| 621 | Precharge Offset Range Test |
| 622 | FPGA Ping Test |
+--------+--------------------------------+
:return:
"""
self.write("*TST?")
self.checkForError()
def checkForError(self):
"""
Query the device for errors. Raises an exception if an error was registered on the device
"""
errors = self.getErrors()
if len(errors) == 1:
code, msg = errors[0]
raise labtronyx.DeviceError(msg.strip('"'))
elif len(errors) > 1:
raise labtronyx.DeviceError("Multiple errors")
def getError(self):
"""
Get the last recorded error from the instrument
:return: error code, error message
"""
err = self.query('SYST:ERR?')
return err.split(',')
def getErrors(self):
"""
Retrieve any queued errors on the instrument
:return: list
"""
errors = []
while True:
err_num, err_msg = self.getError()
if float(err_num) == 0:
break
else:
errors.append((err_num, err_msg,))
return errors
def enableFrontPanel(self):
"""
Enables the front panel display if it was previously disabled.
"""
self.write("DISP 1")
self.write("DISP:WIND1:TEXT:CLEAR")
self.write("DISP:WIND2:TEXT:CLEAR")
def disableFrontPanel(self):
"""
Disables the front panel display. Display can be re-enabled by calling
`enableFrontPanel` or pressing the `LOCAL` button on the instrument.
.. note:
When the front panel is disabled, the instrument runs faster
"""
self.write("DISP 0")
def frontPanelText(self, text_top, text_bottom):
"""
Set the text on the front panel of the instrument. The top line is limited to 12 characters, the bottom line to
18 characters. You can use letters (A-Z), numbers (0-9), and special characters like "@", "%", "*", etc.
Use "#" character to display a degree symbol.
:param text_top: Top text (up to 12 characters)
:type text_top: str
:param text_bottom: Bottom text (up to 18 characters)
:type text_bottom: str
"""
if len(text_top) > 12:
text_top = text_top[0:12]
if len(text_bottom) > 18:
text_bottom = text_bottom[0:18]
if len(text_top) > 0:
self.write('DISP:WIND1:TEXT "%s"' % text_top)
if len(text_bottom) > 0:
self.write('DISP:WIND2:TEXT "%s"' % text_bottom)
def setMode(self, func):
"""
Set the configuration mode
Valid modes:
* 'AC Voltage'
* 'DC Voltage'
* 'Resistance'
* '4-wire Resistance'
* 'AC Current'
* 'DC Current'
* 'Frequency'
* 'Period'
* 'Diode'
* 'Continuity'
* 'Capacitance
* 'Temperature'
:param func: Configuration mode
:type func: str
"""
# Convert to instrument mode if needed
func = self.VALID_MODES.get(func, func)
if func not in self.VALID_MODES.values():
raise ValueError("Invalid Mode")
self.write("CONF:{0}".format(func))
# Verify
self.getMode()
if self._mode.upper() != func.upper():
raise RuntimeError('Set value failed verification')
def getMode(self):
"""
Get the current operating mode
:returns: str
"""
mode = self.query("CONF?")
# Returns mode and a series of comma-separated fields indicating the preset function, range and resolution
# We are only interested in the mode at the beginning. Use a regex to get the mode out of the string
re_mode = re.compile(r'([A-Z:]+)\s?([A-Z0-9,+\-.]*)')
re_srch = re.search(re_mode, mode)
if re_srch is not None:
self._mode = re_srch.group(1)
for desc, code in self.VALID_MODES.items():
if self._mode == code:
return desc
return 'Unknown'
def getRange(self):
"""
Get the range for the measurement.
:returns: float
"""
return self.query("SENS:%s:RANGE?" % self.func)
def setRange(self, new_range):
"""
Set the range for the measurement. The range is selected by specifying
the expected reading as an absolute value. The instrument will then
go to the most ideal range that will accommodate the expected reading
Possible value ranges:
* 'AUTO'
* DC Voltage: 0 to 1000 Volts
* AC Voltage: 0 to 750 Volts
* Current: 0 to 20 Amps
* Resistance: 0 to 20e6 ohms
* Frequency or Period: 0 to 1010 Volts
:param new_range: Measurement Range
:type new_range: str
"""
if str(new_range).upper() == 'AUTO':
self.write('SENS:%s:RANGE:AUTO ON' % self.func)
else:
self.write('SENS:%s:RANGE:AUTO OFF' % self.func)
def getMeasurement(self):
"""
Get the last available reading from the instrument. This command does
not trigger a measurement if trigger source is not set to `IMMEDIATE`.
:returns: float
"""
# Attempt three times to get a measurement
for x in range(3):
try:
# Initiate a measurement
self.write("INIT")
time.sleep(0.01)
data = str(self.query("FETC?"))
if ',' in data:
data = data.split(',')
return map(float, data)
else:
return float(data)
except ValueError:
# Try again
pass
def setIntegrationRate(self, value):
"""
Set the integration period (measurement speed) for the basic measurement
functions (except frequency and period). Expressed as a factor of the
power line frequency (PLC = Power Line Cycles).
Valid values: 0.006, 0.02, 0.06, 0.2, 1, 2, 10, 100
Value of 'DEF' sets the integration rate to 1 PLC
.. note:
A rate of 1 would result in 16.67 ms integration period (Assuming
60 hz power line frequency.
:param value: Integration rate
:type value:
"""
self.write("SENS:%s:NPLC %s" % (self.func, str(value)))
def getIntegrationRate(self):
"""
Get the integration period (measurement speed). Expressed as a factor
of the power line frequency.
:returns: float
"""
return float(self.query(":{0}:NPLC?".format(self.func)))
def setTriggerCount(self, count):
"""
This command selects the number of triggers that will be accepted by
the meter before returning to the "idle" trigger state.
A value of '0' will set the multimeter into continuous trigger mode.
:param count: Number of triggers
:type count: int
"""
self.write("TRIG:COUN %i" % int(count))
def setTriggerDelay(self, delay=None):
"""
This command sets the delay between the trigger signal and the first
measurement. This may be useful in applications where you want to allow
the input to settle before taking a reading or for pacing a burst of
readings. The programmed trigger delay overrides the default trigger
delay that the instrument automatically adds.
If delay is not provided, the automatic trigger delay is enabled
Note::
The Continuity and Diode test functions ignore the trigger delay
setting
:param delay: Trigger delay (in seconds)
:type delay: float
"""
if delay is None:
self.write("TRIG:DEL:AUTO ON")
else:
self.write("TRIG:DEL:AUTO OFF")
self.write("TRIG:DEL %f" % float(delay))
def setTriggerSource(self, source):
"""
Set the trigger source for a measurement.
Valid values:
* `IMMEDIATE`: Internal continuous trigger
* `BUS`: Triggered via USB/RS-232 Interface
* `EXTERNAL`: Triggered via the 'Ext Trig Input' BNC connector
For the EXTernal source, the instrument will accept a hardware trigger
applied to the rear-panel Ext Trig Input BNC connector. The instrument
takes one reading, or the specified number of readings (sample count),
each time a TTL pulse (low-true for slope = negative) is received. If
the instrument receives an external trigger before it is ready to accept
one, it will buffer one trigger.
:param source: Trigger source
:type source: str
"""
if source in self.VALID_TRIGGER_SOURCES.values():
self.write("TRIG:SOUR %s" % source)
else:
raise ValueError('Invalid trigger source')
def setSampleCount(self, samples):
"""
Set the number of readings (samples) the multimeter will take per trigger.
When the sample source is `Immediate`, the trigger delay value is used to determine how far apart the samples
are to be taken. In `Timer` mode, the sample timer value is used.
:param samples: Number of samples
:type samples: int
"""
self.write("SAMP:COUN %s" % samples)
self.checkForError()
def getSampleCount(self):
"""
Get the number of readings (samples) the multimeter will take per trigger.
:return: Number of samples (int)
"""
return int(self.query("SAMP:COUN?"))
| |
import datetime
import pandas as pd
from odmtools.odmdata import *
import os
import sys
def build_db(engine):
Base.metadata.create_all(engine)
# Create DB objects #
def add_bulk_data_values(session, series, dvs_size):
"""
Load up exampleData.csv into a series' datavalues field
"""
assert 10000 >= dvs_size > 0
path = os.path.dirname(os.path.realpath(__file__))
filepath = os.path.join(path, 'example_files', 'exampleData.csv')
df = pd.read_csv(filepath)
df['LocalDateTime'] = pd.to_datetime(df['LocalDateTime']).astype(datetime.datetime)
df['DateTimeUTC'] = pd.to_datetime(df['DateTimeUTC']).astype(datetime.datetime)
dvs = []
for record in df.to_dict('records')[:dvs_size]:
dv = DataValue()
dv.data_value = record['DataValue']
dv.local_date_time = record['LocalDateTime']
dv.utc_offset = record['UTCOffset']
dv.date_time_utc = record['DateTimeUTC']
dv.site_id = series.site_id
dv.variable_id = series.variable_id
dv.censor_code = record['CensorCode']
dv.method_id = series.method_id
dv.source_id = series.source_id
dv.quality_control_level_id = series.quality_control_level_id
dvs.append(dv)
series.data_values = dvs
session.add_all(dvs)
session.commit()
return df
def add_series_bulk_data(session, dvs_size=50):
site = add_site(session)
var = add_variable(session)
qcl = add_qcl(session)
method = add_method(session)
source = add_source(session)
series = Series()
series.site = site
series.site_code = site.code
series.variable = var
series.variable_code = var.code
series.method = method
series.source = source
series.quality_control_level_id = qcl.id
df = add_bulk_data_values(session, series, dvs_size)
sorted_df = sorted(df['LocalDateTime'])
series.begin_date_time = sorted_df[0]
assert isinstance(series.begin_date_time, datetime.datetime)
series.end_date_time = sorted_df[-1]
assert isinstance(series.end_date_time, datetime.datetime)
sorted_df = sorted(df['DateTimeUTC'])
series.begin_date_time_utc = sorted_df[0]
assert isinstance(series.begin_date_time_utc, datetime.datetime)
series.end_date_time_utc = sorted_df[-1]
assert isinstance(series.end_date_time_utc, datetime.datetime)
session.add(series)
session.commit()
return series
# Create Series objects
def add_series(session):
site = add_site(session)
var = add_variable(session)
qcl = add_qcl(session)
method = add_method(session)
source = add_source(session)
series = Series()
series.site = site
series.site_code = site.code
series.variable = var
series.variable_code = var.code
series.method = method
series.source = source
series.quality_control_level_id = qcl.id
dvs = add_data_values(session, series)
series.begin_date_time = dvs[0].local_date_time
series.end_date_time = dvs[-1].local_date_time
series.begin_date_time_utc = dvs[0].date_time_utc
series.end_date_time_utc = dvs[-1].date_time_utc
series.value_count = len(dvs)
session.add(series)
session.commit()
return series
def add_data_values(session, series):
dvs = []
for i in range(10):
dv = DataValue()
dv.data_value = i
dv.local_date_time = datetime.datetime.now() - datetime.timedelta(days=i)
dv.utc_offset = 0
dv.date_time_utc = dv.local_date_time
dv.site_id = series.site_id
dv.variable_id = series.variable_id
dv.censor_code = "NC"
dv.method_id = series.method_id
dv.source_id = series.source_id
dv.quality_control_level_id = series.quality_control_level_id
dvs.append(dv)
series.data_values = dvs
session.add_all(dvs)
session.commit()
return dvs
def add_site(session):
spatial_ref = add_spatial_reference(session)
site = Site("ABC123", "Test Site")
site.latitude = 10.0
site.longitude = 10.0
site.lat_long_datum_id = spatial_ref.id
site.local_projection_id = spatial_ref.id
site.elevation_m = 1000
site.local_x = 10.0
site.local_y = 10.0
session.add(site)
session.commit()
return site
def add_variable(session):
unit = add_unit(session)
variable = Variable()
variable.code = "ABC123"
variable.name = "Test Variable"
variable.speciation = "Test"
variable.variable_unit_id = unit.id
variable.sample_medium = "Test Medium"
variable.value_type = "Test Val Type"
variable.is_regular = True
variable.time_support = 3.14
variable.time_unit_id = unit.id
variable.data_type = "Test Data Type"
variable.general_category = "Test Category"
variable.no_data_value = -2000.0
session.add(variable)
session.commit()
return variable
def add_method(session):
method = Method()
method.description = "This is a test"
session.add(method)
session.commit()
return method
def add_qcl(session):
qcl = QualityControlLevel()
qcl.code = "ABC123"
qcl.definition = "This is a test"
qcl.explanation = "A test is a thing that tests code"
session.add(qcl)
session.commit()
return qcl
def add_source(session):
source = Source()
source.organization = "Test Organization"
source.description = "This is a test"
source.contact_name = "Test Name"
source.phone = "555-1234"
source.email = "source@example.com"
source.address = "123 Test Street"
source.city = "Metropolis"
source.state = "NY"
source.zip_code = "12345"
source.citation = "Test Citation"
iso = add_iso_metadata(session)
source.iso_metadata_id = iso.id
session.add(source)
session.commit()
return source
def add_iso_metadata(session):
iso = ISOMetadata()
iso.topic_category = "Test Topic"
iso.title = "Test Title"
iso.abstract = "Test Abstract"
iso.profile_version = "1.0.0.0rc4"
session.add(iso)
session.commit()
return iso
def add_spatial_reference(session):
spatial_ref = SpatialReference()
spatial_ref.srs_name = "This is a test"
session.add(spatial_ref)
session.commit()
return spatial_ref
# Create CVs #
def add_vertical_datum_cv(session):
vert_dat = VerticalDatumCV()
vert_dat.term = "Test"
vert_dat.definition = "This is a test"
session.add(vert_dat)
session.commit()
return vert_dat
def add_lab_method(session):
lab_method = LabMethod()
lab_method.name = "Test Lab"
lab_method.organization = "Test Org"
lab_method.method_name = "Test Method"
lab_method.method_description = "Test Description"
lab_method.method_link = "Test Link"
session.add(lab_method)
session.commit()
return lab_method
def add_sample(session, lab_method_id):
sample = Sample()
sample.type = "Test"
sample.lab_sample_code = "ABC123"
sample.lab_method_id = lab_method_id
session.add(sample)
session.commit()
return sample
def add_site_type_cv(session):
st_cv = SiteTypeCV()
st_cv.term = "Test"
st_cv.definition = "This is a test"
session.add(st_cv)
session.commit()
return st_cv
def add_variable_name_cv(session):
var_name_cv = VariableNameCV()
var_name_cv.term = "Test"
var_name_cv.definition = "This is a test"
session.add(var_name_cv)
session.commit()
return var_name_cv
def add_unit(session):
unit = Unit()
unit.name = "Test"
unit.type = "Test"
unit.abbreviation = "T"
session.add(unit)
session.commit()
return unit
def add_offset_type_cv(session, unit_id):
offset = OffsetType()
offset.unit_id = unit_id
offset.description = "This is a test"
session.add(offset)
session.commit()
return offset
def add_speciation_cv(session):
spec = SpeciationCV()
spec.term = "Test"
spec.definition = "This is a test"
session.add(spec)
session.commit()
return spec
def add_sample_medium_cv(session):
samp_med = SampleMediumCV()
samp_med.term = "Test"
samp_med.definition = "This is a test"
session.add(samp_med)
session.commit()
return samp_med
def add_value_type_cv(session):
value_type = ValueTypeCV()
value_type.term = "Test"
value_type.definition = "This is a test"
session.add(value_type)
session.commit()
return value_type
def add_data_type_cv(session):
data_type = DataTypeCV()
data_type.term = "Test"
data_type.definition = "This is a test"
session.add(data_type)
session.commit()
return data_type
def add_general_category_cv(session):
gen_cat = GeneralCategoryCV()
gen_cat.term = "Test"
gen_cat.definition = "This is a test"
session.add(gen_cat)
session.commit()
return gen_cat
def add_censor_code_cv(session):
censor = CensorCodeCV()
censor.term = "Test"
censor.definition = "This is a test"
session.add(censor)
session.commit()
return censor
def add_sample_type_cv(session):
sample_type = SampleTypeCV()
sample_type.term = "Test"
sample_type.definition = "This is a test"
session.add(sample_type)
session.commit()
return sample_type
def add_version(session):
version = ODMVersion()
version.version_number = "1.0.0.0.1alpha"
session.add(version)
session.commit()
return version
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/TestScript) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class TestScript(domainresource.DomainResource):
""" Describes a set of tests.
A structured set of tests against a FHIR server or client implementation to
determine compliance against the FHIR specification.
"""
resource_type = "TestScript"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.contact = None
""" Contact details for the publisher.
List of `ContactDetail` items (represented as `dict` in JSON). """
self.copyright = None
""" Use and/or publishing restrictions.
Type `str`. """
self.date = None
""" Date last changed.
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" Natural language description of the test script.
Type `str`. """
self.destination = None
""" An abstract server representing a destination or receiver in a
message exchange.
List of `TestScriptDestination` items (represented as `dict` in JSON). """
self.experimental = None
""" For testing purposes, not real usage.
Type `bool`. """
self.fixture = None
""" Fixture in the test script - by reference (uri).
List of `TestScriptFixture` items (represented as `dict` in JSON). """
self.identifier = None
""" Additional identifier for the test script.
Type `Identifier` (represented as `dict` in JSON). """
self.jurisdiction = None
""" Intended jurisdiction for test script (if applicable).
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.metadata = None
""" Required capability that is assumed to function correctly on the
FHIR server being tested.
Type `TestScriptMetadata` (represented as `dict` in JSON). """
self.name = None
""" Name for this test script (computer friendly).
Type `str`. """
self.origin = None
""" An abstract server representing a client or sender in a message
exchange.
List of `TestScriptOrigin` items (represented as `dict` in JSON). """
self.profile = None
""" Reference of the validation profile.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.publisher = None
""" Name of the publisher (organization or individual).
Type `str`. """
self.purpose = None
""" Why this test script is defined.
Type `str`. """
self.setup = None
""" A series of required setup operations before tests are executed.
Type `TestScriptSetup` (represented as `dict` in JSON). """
self.status = None
""" draft | active | retired | unknown.
Type `str`. """
self.teardown = None
""" A series of required clean up steps.
Type `TestScriptTeardown` (represented as `dict` in JSON). """
self.test = None
""" A test in this script.
List of `TestScriptTest` items (represented as `dict` in JSON). """
self.title = None
""" Name for this test script (human friendly).
Type `str`. """
self.url = None
""" Canonical identifier for this test script, represented as a URI
(globally unique).
Type `str`. """
self.useContext = None
""" The context that the content is intended to support.
List of `UsageContext` items (represented as `dict` in JSON). """
self.variable = None
""" Placeholder for evaluated elements.
List of `TestScriptVariable` items (represented as `dict` in JSON). """
self.version = None
""" Business version of the test script.
Type `str`. """
super(TestScript, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScript, self).elementProperties()
js.extend([
("contact", "contact", contactdetail.ContactDetail, True, None, False),
("copyright", "copyright", str, False, None, False),
("date", "date", fhirdate.FHIRDate, False, None, False),
("description", "description", str, False, None, False),
("destination", "destination", TestScriptDestination, True, None, False),
("experimental", "experimental", bool, False, None, False),
("fixture", "fixture", TestScriptFixture, True, None, False),
("identifier", "identifier", identifier.Identifier, False, None, False),
("jurisdiction", "jurisdiction", codeableconcept.CodeableConcept, True, None, False),
("metadata", "metadata", TestScriptMetadata, False, None, False),
("name", "name", str, False, None, True),
("origin", "origin", TestScriptOrigin, True, None, False),
("profile", "profile", fhirreference.FHIRReference, True, None, False),
("publisher", "publisher", str, False, None, False),
("purpose", "purpose", str, False, None, False),
("setup", "setup", TestScriptSetup, False, None, False),
("status", "status", str, False, None, True),
("teardown", "teardown", TestScriptTeardown, False, None, False),
("test", "test", TestScriptTest, True, None, False),
("title", "title", str, False, None, False),
("url", "url", str, False, None, True),
("useContext", "useContext", usagecontext.UsageContext, True, None, False),
("variable", "variable", TestScriptVariable, True, None, False),
("version", "version", str, False, None, False),
])
return js
from . import backboneelement
class TestScriptDestination(backboneelement.BackboneElement):
""" An abstract server representing a destination or receiver in a message
exchange.
An abstract server used in operations within this test script in the
destination element.
"""
resource_type = "TestScriptDestination"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.index = None
""" The index of the abstract destination server starting at 1.
Type `int`. """
self.profile = None
""" FHIR-Server | FHIR-SDC-FormManager | FHIR-SDC-FormReceiver | FHIR-
SDC-FormProcessor.
Type `Coding` (represented as `dict` in JSON). """
super(TestScriptDestination, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptDestination, self).elementProperties()
js.extend([
("index", "index", int, False, None, True),
("profile", "profile", coding.Coding, False, None, True),
])
return js
class TestScriptFixture(backboneelement.BackboneElement):
""" Fixture in the test script - by reference (uri).
Fixture in the test script - by reference (uri). All fixtures are required
for the test script to execute.
"""
resource_type = "TestScriptFixture"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.autocreate = None
""" Whether or not to implicitly create the fixture during setup.
Type `bool`. """
self.autodelete = None
""" Whether or not to implicitly delete the fixture during teardown.
Type `bool`. """
self.resource = None
""" Reference of the resource.
Type `FHIRReference` (represented as `dict` in JSON). """
super(TestScriptFixture, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptFixture, self).elementProperties()
js.extend([
("autocreate", "autocreate", bool, False, None, True),
("autodelete", "autodelete", bool, False, None, True),
("resource", "resource", fhirreference.FHIRReference, False, None, False),
])
return js
class TestScriptMetadata(backboneelement.BackboneElement):
""" Required capability that is assumed to function correctly on the FHIR
server being tested.
The required capability must exist and are assumed to function correctly on
the FHIR server being tested.
"""
resource_type = "TestScriptMetadata"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.capability = None
""" Capabilities that are assumed to function correctly on the FHIR
server being tested.
List of `TestScriptMetadataCapability` items (represented as `dict` in JSON). """
self.link = None
""" Links to the FHIR specification.
List of `TestScriptMetadataLink` items (represented as `dict` in JSON). """
super(TestScriptMetadata, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptMetadata, self).elementProperties()
js.extend([
("capability", "capability", TestScriptMetadataCapability, True, None, True),
("link", "link", TestScriptMetadataLink, True, None, False),
])
return js
class TestScriptMetadataCapability(backboneelement.BackboneElement):
""" Capabilities that are assumed to function correctly on the FHIR server
being tested.
Capabilities that must exist and are assumed to function correctly on the
FHIR server being tested.
"""
resource_type = "TestScriptMetadataCapability"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.capabilities = None
""" Required Capability Statement.
Type `str`. """
self.description = None
""" The expected capabilities of the server.
Type `str`. """
self.destination = None
""" Which server these requirements apply to.
Type `int`. """
self.link = None
""" Links to the FHIR specification.
List of `str` items. """
self.origin = None
""" Which origin server these requirements apply to.
List of `int` items. """
self.required = None
""" Are the capabilities required?.
Type `bool`. """
self.validated = None
""" Are the capabilities validated?.
Type `bool`. """
super(TestScriptMetadataCapability, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptMetadataCapability, self).elementProperties()
js.extend([
("capabilities", "capabilities", str, False, None, True),
("description", "description", str, False, None, False),
("destination", "destination", int, False, None, False),
("link", "link", str, True, None, False),
("origin", "origin", int, True, None, False),
("required", "required", bool, False, None, True),
("validated", "validated", bool, False, None, True),
])
return js
class TestScriptMetadataLink(backboneelement.BackboneElement):
""" Links to the FHIR specification.
A link to the FHIR specification that this test is covering.
"""
resource_type = "TestScriptMetadataLink"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Short description.
Type `str`. """
self.url = None
""" URL to the specification.
Type `str`. """
super(TestScriptMetadataLink, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptMetadataLink, self).elementProperties()
js.extend([
("description", "description", str, False, None, False),
("url", "url", str, False, None, True),
])
return js
class TestScriptOrigin(backboneelement.BackboneElement):
""" An abstract server representing a client or sender in a message exchange.
An abstract server used in operations within this test script in the origin
element.
"""
resource_type = "TestScriptOrigin"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.index = None
""" The index of the abstract origin server starting at 1.
Type `int`. """
self.profile = None
""" FHIR-Client | FHIR-SDC-FormFiller.
Type `Coding` (represented as `dict` in JSON). """
super(TestScriptOrigin, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptOrigin, self).elementProperties()
js.extend([
("index", "index", int, False, None, True),
("profile", "profile", coding.Coding, False, None, True),
])
return js
class TestScriptSetup(backboneelement.BackboneElement):
""" A series of required setup operations before tests are executed.
"""
resource_type = "TestScriptSetup"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.action = None
""" A setup operation or assert to perform.
List of `TestScriptSetupAction` items (represented as `dict` in JSON). """
super(TestScriptSetup, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptSetup, self).elementProperties()
js.extend([
("action", "action", TestScriptSetupAction, True, None, True),
])
return js
class TestScriptSetupAction(backboneelement.BackboneElement):
""" A setup operation or assert to perform.
Action would contain either an operation or an assertion.
"""
resource_type = "TestScriptSetupAction"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.assert_fhir = None
""" The assertion to perform.
Type `TestScriptSetupActionAssert` (represented as `dict` in JSON). """
self.operation = None
""" The setup operation to perform.
Type `TestScriptSetupActionOperation` (represented as `dict` in JSON). """
super(TestScriptSetupAction, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptSetupAction, self).elementProperties()
js.extend([
("assert_fhir", "assert", TestScriptSetupActionAssert, False, None, False),
("operation", "operation", TestScriptSetupActionOperation, False, None, False),
])
return js
class TestScriptSetupActionAssert(backboneelement.BackboneElement):
""" The assertion to perform.
Evaluates the results of previous operations to determine if the server
under test behaves appropriately.
"""
resource_type = "TestScriptSetupActionAssert"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.compareToSourceExpression = None
""" The FHIRPath expression to evaluate against the source fixture.
Type `str`. """
self.compareToSourceId = None
""" Id of the source fixture to be evaluated.
Type `str`. """
self.compareToSourcePath = None
""" XPath or JSONPath expression to evaluate against the source fixture.
Type `str`. """
self.contentType = None
""" Mime type to compare against the 'Content-Type' header.
Type `str`. """
self.description = None
""" Tracking/reporting assertion description.
Type `str`. """
self.direction = None
""" response | request.
Type `str`. """
self.expression = None
""" The FHIRPath expression to be evaluated.
Type `str`. """
self.headerField = None
""" HTTP header field name.
Type `str`. """
self.label = None
""" Tracking/logging assertion label.
Type `str`. """
self.minimumId = None
""" Fixture Id of minimum content resource.
Type `str`. """
self.navigationLinks = None
""" Perform validation on navigation links?.
Type `bool`. """
self.operator = None
""" equals | notEquals | in | notIn | greaterThan | lessThan | empty |
notEmpty | contains | notContains | eval.
Type `str`. """
self.path = None
""" XPath or JSONPath expression.
Type `str`. """
self.requestMethod = None
""" delete | get | options | patch | post | put | head.
Type `str`. """
self.requestURL = None
""" Request URL comparison value.
Type `str`. """
self.resource = None
""" Resource type.
Type `str`. """
self.response = None
""" okay | created | noContent | notModified | bad | forbidden |
notFound | methodNotAllowed | conflict | gone | preconditionFailed
| unprocessable.
Type `str`. """
self.responseCode = None
""" HTTP response code to test.
Type `str`. """
self.sourceId = None
""" Fixture Id of source expression or headerField.
Type `str`. """
self.validateProfileId = None
""" Profile Id of validation profile reference.
Type `str`. """
self.value = None
""" The value to compare to.
Type `str`. """
self.warningOnly = None
""" Will this assert produce a warning only on error?.
Type `bool`. """
super(TestScriptSetupActionAssert, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptSetupActionAssert, self).elementProperties()
js.extend([
("compareToSourceExpression", "compareToSourceExpression", str, False, None, False),
("compareToSourceId", "compareToSourceId", str, False, None, False),
("compareToSourcePath", "compareToSourcePath", str, False, None, False),
("contentType", "contentType", str, False, None, False),
("description", "description", str, False, None, False),
("direction", "direction", str, False, None, False),
("expression", "expression", str, False, None, False),
("headerField", "headerField", str, False, None, False),
("label", "label", str, False, None, False),
("minimumId", "minimumId", str, False, None, False),
("navigationLinks", "navigationLinks", bool, False, None, False),
("operator", "operator", str, False, None, False),
("path", "path", str, False, None, False),
("requestMethod", "requestMethod", str, False, None, False),
("requestURL", "requestURL", str, False, None, False),
("resource", "resource", str, False, None, False),
("response", "response", str, False, None, False),
("responseCode", "responseCode", str, False, None, False),
("sourceId", "sourceId", str, False, None, False),
("validateProfileId", "validateProfileId", str, False, None, False),
("value", "value", str, False, None, False),
("warningOnly", "warningOnly", bool, False, None, True),
])
return js
class TestScriptSetupActionOperation(backboneelement.BackboneElement):
""" The setup operation to perform.
The operation to perform.
"""
resource_type = "TestScriptSetupActionOperation"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.accept = None
""" Mime type to accept in the payload of the response, with charset
etc..
Type `str`. """
self.contentType = None
""" Mime type of the request payload contents, with charset etc..
Type `str`. """
self.description = None
""" Tracking/reporting operation description.
Type `str`. """
self.destination = None
""" Server responding to the request.
Type `int`. """
self.encodeRequestUrl = None
""" Whether or not to send the request url in encoded format.
Type `bool`. """
self.label = None
""" Tracking/logging operation label.
Type `str`. """
self.method = None
""" delete | get | options | patch | post | put | head.
Type `str`. """
self.origin = None
""" Server initiating the request.
Type `int`. """
self.params = None
""" Explicitly defined path parameters.
Type `str`. """
self.requestHeader = None
""" Each operation can have one or more header elements.
List of `TestScriptSetupActionOperationRequestHeader` items (represented as `dict` in JSON). """
self.requestId = None
""" Fixture Id of mapped request.
Type `str`. """
self.resource = None
""" Resource type.
Type `str`. """
self.responseId = None
""" Fixture Id of mapped response.
Type `str`. """
self.sourceId = None
""" Fixture Id of body for PUT and POST requests.
Type `str`. """
self.targetId = None
""" Id of fixture used for extracting the [id], [type], and [vid] for
GET requests.
Type `str`. """
self.type = None
""" The operation code type that will be executed.
Type `Coding` (represented as `dict` in JSON). """
self.url = None
""" Request URL.
Type `str`. """
super(TestScriptSetupActionOperation, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptSetupActionOperation, self).elementProperties()
js.extend([
("accept", "accept", str, False, None, False),
("contentType", "contentType", str, False, None, False),
("description", "description", str, False, None, False),
("destination", "destination", int, False, None, False),
("encodeRequestUrl", "encodeRequestUrl", bool, False, None, True),
("label", "label", str, False, None, False),
("method", "method", str, False, None, False),
("origin", "origin", int, False, None, False),
("params", "params", str, False, None, False),
("requestHeader", "requestHeader", TestScriptSetupActionOperationRequestHeader, True, None, False),
("requestId", "requestId", str, False, None, False),
("resource", "resource", str, False, None, False),
("responseId", "responseId", str, False, None, False),
("sourceId", "sourceId", str, False, None, False),
("targetId", "targetId", str, False, None, False),
("type", "type", coding.Coding, False, None, False),
("url", "url", str, False, None, False),
])
return js
class TestScriptSetupActionOperationRequestHeader(backboneelement.BackboneElement):
""" Each operation can have one or more header elements.
Header elements would be used to set HTTP headers.
"""
resource_type = "TestScriptSetupActionOperationRequestHeader"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.field = None
""" HTTP header field name.
Type `str`. """
self.value = None
""" HTTP headerfield value.
Type `str`. """
super(TestScriptSetupActionOperationRequestHeader, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptSetupActionOperationRequestHeader, self).elementProperties()
js.extend([
("field", "field", str, False, None, True),
("value", "value", str, False, None, True),
])
return js
class TestScriptTeardown(backboneelement.BackboneElement):
""" A series of required clean up steps.
A series of operations required to clean up after all the tests are
executed (successfully or otherwise).
"""
resource_type = "TestScriptTeardown"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.action = None
""" One or more teardown operations to perform.
List of `TestScriptTeardownAction` items (represented as `dict` in JSON). """
super(TestScriptTeardown, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptTeardown, self).elementProperties()
js.extend([
("action", "action", TestScriptTeardownAction, True, None, True),
])
return js
class TestScriptTeardownAction(backboneelement.BackboneElement):
""" One or more teardown operations to perform.
The teardown action will only contain an operation.
"""
resource_type = "TestScriptTeardownAction"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.operation = None
""" The teardown operation to perform.
Type `TestScriptSetupActionOperation` (represented as `dict` in JSON). """
super(TestScriptTeardownAction, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptTeardownAction, self).elementProperties()
js.extend([
("operation", "operation", TestScriptSetupActionOperation, False, None, True),
])
return js
class TestScriptTest(backboneelement.BackboneElement):
""" A test in this script.
"""
resource_type = "TestScriptTest"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.action = None
""" A test operation or assert to perform.
List of `TestScriptTestAction` items (represented as `dict` in JSON). """
self.description = None
""" Tracking/reporting short description of the test.
Type `str`. """
self.name = None
""" Tracking/logging name of this test.
Type `str`. """
super(TestScriptTest, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptTest, self).elementProperties()
js.extend([
("action", "action", TestScriptTestAction, True, None, True),
("description", "description", str, False, None, False),
("name", "name", str, False, None, False),
])
return js
class TestScriptTestAction(backboneelement.BackboneElement):
""" A test operation or assert to perform.
Action would contain either an operation or an assertion.
"""
resource_type = "TestScriptTestAction"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.assert_fhir = None
""" The setup assertion to perform.
Type `TestScriptSetupActionAssert` (represented as `dict` in JSON). """
self.operation = None
""" The setup operation to perform.
Type `TestScriptSetupActionOperation` (represented as `dict` in JSON). """
super(TestScriptTestAction, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptTestAction, self).elementProperties()
js.extend([
("assert_fhir", "assert", TestScriptSetupActionAssert, False, None, False),
("operation", "operation", TestScriptSetupActionOperation, False, None, False),
])
return js
class TestScriptVariable(backboneelement.BackboneElement):
""" Placeholder for evaluated elements.
Variable is set based either on element value in response body or on header
field value in the response headers.
"""
resource_type = "TestScriptVariable"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.defaultValue = None
""" Default, hard-coded, or user-defined value for this variable.
Type `str`. """
self.description = None
""" Natural language description of the variable.
Type `str`. """
self.expression = None
""" The FHIRPath expression against the fixture body.
Type `str`. """
self.headerField = None
""" HTTP header field name for source.
Type `str`. """
self.hint = None
""" Hint help text for default value to enter.
Type `str`. """
self.name = None
""" Descriptive name for this variable.
Type `str`. """
self.path = None
""" XPath or JSONPath against the fixture body.
Type `str`. """
self.sourceId = None
""" Fixture Id of source expression or headerField within this variable.
Type `str`. """
super(TestScriptVariable, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(TestScriptVariable, self).elementProperties()
js.extend([
("defaultValue", "defaultValue", str, False, None, False),
("description", "description", str, False, None, False),
("expression", "expression", str, False, None, False),
("headerField", "headerField", str, False, None, False),
("hint", "hint", str, False, None, False),
("name", "name", str, False, None, True),
("path", "path", str, False, None, False),
("sourceId", "sourceId", str, False, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import coding
except ImportError:
coding = sys.modules[__package__ + '.coding']
try:
from . import contactdetail
except ImportError:
contactdetail = sys.modules[__package__ + '.contactdetail']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import usagecontext
except ImportError:
usagecontext = sys.modules[__package__ + '.usagecontext']
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various high level TF models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.contrib.learn.python.learn.ops import losses_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops as array_ops_
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope as vs
def linear_regression_zero_init(x, y):
"""Linear regression subgraph with zero-value initial weights and bias.
Args:
x: tensor or placeholder for input features.
y: tensor or placeholder for target.
Returns:
Predictions and loss tensors.
"""
return linear_regression(x, y, init_mean=0.0, init_stddev=0.0)
def logistic_regression_zero_init(x, y):
"""Logistic regression subgraph with zero-value initial weights and bias.
Args:
x: tensor or placeholder for input features.
y: tensor or placeholder for target.
Returns:
Predictions and loss tensors.
"""
return logistic_regression(x, y, init_mean=0.0, init_stddev=0.0)
def linear_regression(x, y, init_mean=None, init_stddev=1.0):
"""Creates linear regression TensorFlow subgraph.
Args:
x: tensor or placeholder for input features.
y: tensor or placeholder for target.
init_mean: the mean value to use for initialization.
init_stddev: the standard devation to use for initialization.
Returns:
Predictions and loss tensors.
Side effects:
The variables linear_regression.weights and linear_regression.bias are
initialized as follows. If init_mean is not None, then initialization
will be done using a random normal initializer with the given init_mean
and init_stddv. (These may be set to 0.0 each if a zero initialization
is desirable for convex use cases.) If init_mean is None, then the
uniform_unit_scaling_initialzer will be used.
"""
with vs.variable_scope('linear_regression'):
scope_name = vs.get_variable_scope().name
logging_ops.histogram_summary('%s.x' % scope_name, x)
logging_ops.histogram_summary('%s.y' % scope_name, y)
dtype = x.dtype.base_dtype
y_shape = y.get_shape()
if len(y_shape) == 1:
output_shape = 1
else:
output_shape = y_shape[1]
# Set up the requested initialization.
if init_mean is None:
weights = vs.get_variable(
'weights', [x.get_shape()[1], output_shape], dtype=dtype)
bias = vs.get_variable('bias', [output_shape], dtype=dtype)
else:
weights = vs.get_variable('weights', [x.get_shape()[1], output_shape],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
bias = vs.get_variable('bias', [output_shape],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
logging_ops.histogram_summary('%s.weights' % scope_name, weights)
logging_ops.histogram_summary('%s.bias' % scope_name, bias)
return losses_ops.mean_squared_error_regressor(x, y, weights, bias)
def logistic_regression(x,
y,
class_weight=None,
init_mean=None,
init_stddev=1.0):
"""Creates logistic regression TensorFlow subgraph.
Args:
x: tensor or placeholder for input features,
shape should be [batch_size, n_features].
y: tensor or placeholder for target,
shape should be [batch_size, n_classes].
class_weight: tensor, [n_classes], where for each class
it has weight of the class. If not provided
will check if graph contains tensor `class_weight:0`.
If that is not provided either all ones are used.
init_mean: the mean value to use for initialization.
init_stddev: the standard devation to use for initialization.
Returns:
Predictions and loss tensors.
Side effects:
The variables linear_regression.weights and linear_regression.bias are
initialized as follows. If init_mean is not None, then initialization
will be done using a random normal initializer with the given init_mean
and init_stddv. (These may be set to 0.0 each if a zero initialization
is desirable for convex use cases.) If init_mean is None, then the
uniform_unit_scaling_initialzer will be used.
"""
with vs.variable_scope('logistic_regression'):
scope_name = vs.get_variable_scope().name
logging_ops.histogram_summary('%s.x' % scope_name, x)
logging_ops.histogram_summary('%s.y' % scope_name, y)
dtype = x.dtype.base_dtype
# Set up the requested initialization.
if init_mean is None:
weights = vs.get_variable(
'weights', [x.get_shape()[1], y.get_shape()[-1]], dtype=dtype)
bias = vs.get_variable('bias', [y.get_shape()[-1]], dtype=dtype)
else:
weights = vs.get_variable('weights',
[x.get_shape()[1], y.get_shape()[-1]],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
bias = vs.get_variable('bias', [y.get_shape()[-1]],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
logging_ops.histogram_summary('%s.weights' % scope_name, weights)
logging_ops.histogram_summary('%s.bias' % scope_name, bias)
# If no class weight provided, try to retrieve one from pre-defined
# tensor name in the graph.
if not class_weight:
try:
class_weight = ops.get_default_graph().get_tensor_by_name(
'class_weight:0')
except KeyError:
pass
return losses_ops.softmax_classifier(x,
y,
weights,
bias,
class_weight=class_weight)
## This will be in TensorFlow 0.7.
## TODO(ilblackdragon): Clean this up when it's released
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
lengths: A tensor of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply
reverses the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
for input_ in input_seq:
input_.set_shape(input_.get_shape().with_rank(2))
# Join into (time, batch_size, depth)
s_joined = array_ops_.pack(input_seq)
# Reverse along dimension 0
s_reversed = array_ops_.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops_.unpack(s_reversed)
return result
def bidirectional_rnn(cell_fw,
cell_bw,
inputs,
initial_state_fw=None,
initial_state_bw=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states
are ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, cell.input_size].
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
[batch_size x cell.state_size].
initial_state_bw: (optional) Same as for initial_state_fw.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int64 vector (tensor) of size
[batch_size],
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to "BiRNN"
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input), which
are depth-concatenated forward and backward outputs
state is the concatenated final state of the forward and backward RNN
Raises:
TypeError: If "cell_fw" or "cell_bw" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell_fw, nn.rnn_cell.RNNCell):
raise TypeError('cell_fw must be an instance of RNNCell')
if not isinstance(cell_bw, nn.rnn_cell.RNNCell):
raise TypeError('cell_bw must be an instance of RNNCell')
if not isinstance(inputs, list):
raise TypeError('inputs must be a list')
if not inputs:
raise ValueError('inputs must not be empty')
name = scope or 'BiRNN'
# Forward direction
with vs.variable_scope(name + '_FW'):
output_fw, state_fw = nn.rnn(cell_fw, inputs, initial_state_fw, dtype,
sequence_length)
# Backward direction
with vs.variable_scope(name + '_BW'):
tmp, state_bw = nn.rnn(cell_bw, _reverse_seq(inputs, sequence_length),
initial_state_bw, dtype, sequence_length)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
outputs = [array_ops_.concat(1, [fw, bw])
for fw, bw in zip(output_fw, output_bw)]
return outputs, array_ops_.concat(1, [state_fw, state_bw])
# End of TensorFlow 0.7
def get_rnn_model(rnn_size, cell_type, num_layers, input_op_fn, bidirectional,
target_predictor_fn, sequence_length, initial_state,
attn_length, attn_size, attn_vec_size):
"""Returns a function that creates a RNN TensorFlow subgraph.
Args:
rnn_size: The size for rnn cell, e.g. size of your word embeddings.
cell_type: The type of rnn cell, including rnn, gru, and lstm.
num_layers: The number of layers of the rnn model.
input_op_fn: Function that will transform the input tensor, such as
creating word embeddings, byte list, etc. This takes
an argument `x` for input and returns transformed `x`.
bidirectional: boolean, Whether this is a bidirectional rnn.
target_predictor_fn: Function that will predict target from input
features. This can be logistic regression,
linear regression or any other model,
that takes `x`, `y` and returns predictions and loss
tensors.
sequence_length: If sequence_length is provided, dynamic calculation is
performed. This saves computational time when unrolling past max sequence
length. Required for bidirectional RNNs.
initial_state: An initial state for the RNN. This must be a tensor of
appropriate type and shape [batch_size x cell.state_size].
attn_length: integer, the size of attention vector attached to rnn cells.
attn_size: integer, the size of an attention window attached to rnn cells.
attn_vec_size: integer, the number of convolutional features calculated on
attention state and the size of the hidden layer built from base cell state.
Returns:
A function that creates the subgraph.
"""
def rnn_estimator(x, y):
"""RNN estimator with target predictor function on top."""
x = input_op_fn(x)
if cell_type == 'rnn':
cell_fn = nn.rnn_cell.BasicRNNCell
elif cell_type == 'gru':
cell_fn = nn.rnn_cell.GRUCell
elif cell_type == 'lstm':
cell_fn = functools.partial(
nn.rnn_cell.BasicLSTMCell, state_is_tuple=False)
else:
raise ValueError('cell_type {} is not supported. '.format(cell_type))
# TODO: state_is_tuple=False is deprecated
if bidirectional:
# forward direction cell
fw_cell = cell_fn(rnn_size)
bw_cell = cell_fn(rnn_size)
# attach attention cells if specified
if attn_length is not None:
fw_cell = contrib_rnn.AttentionCellWrapper(
fw_cell, attn_length=attn_length, attn_size=attn_size,
attn_vec_size=attn_vec_size, state_is_tuple=False)
bw_cell = contrib_rnn.AttentionCellWrapper(
bw_cell, attn_length=attn_length, attn_size=attn_size,
attn_vec_size=attn_vec_size, state_is_tuple=False)
rnn_fw_cell = nn.rnn_cell.MultiRNNCell([fw_cell] * num_layers,
state_is_tuple=False)
# backward direction cell
rnn_bw_cell = nn.rnn_cell.MultiRNNCell([bw_cell] * num_layers,
state_is_tuple=False)
# pylint: disable=unexpected-keyword-arg, no-value-for-parameter
_, encoding = bidirectional_rnn(rnn_fw_cell,
rnn_bw_cell,
x,
dtype=dtypes.float32,
sequence_length=sequence_length,
initial_state_fw=initial_state,
initial_state_bw=initial_state)
else:
rnn_cell = cell_fn(rnn_size)
if attn_length is not None:
rnn_cell = contrib_rnn.AttentionCellWrapper(
rnn_cell, attn_length=attn_length, attn_size=attn_size,
attn_vec_size=attn_vec_size, state_is_tuple=False)
cell = nn.rnn_cell.MultiRNNCell([rnn_cell] * num_layers,
state_is_tuple=False)
_, encoding = nn.rnn(cell,
x,
dtype=dtypes.float32,
sequence_length=sequence_length,
initial_state=initial_state)
return target_predictor_fn(encoding, y)
return rnn_estimator
| |
from __future__ import absolute_import
from __future__ import unicode_literals
import flask
from flask import request
from keg.web import BaseView as KegBaseView, route, rule
blueprint = flask.Blueprint('routing', __name__)
class BaseView(KegBaseView):
blueprint = blueprint
class VerbRouting(BaseView):
"""
GET /verb-routing -> 'method get'
POST /verb-routing -> 'method post'
"""
def get(self):
return 'method get'
def post(self):
return 'method post'
class VerbRoutingSub(VerbRouting):
"""
Same as VerbRouting
"""
class ExplicitRoute(BaseView):
"""
/explicit-route -> 404
/some-route/foo -> 'get some-route'
"""
# absolute URL indicates the default class URL will not be generated
rule('/some-route')
def get(self):
return 'get some-route'
class HelloWorld(BaseView):
"""
/hello -> 'Hello World'
/hello/foo -> 'Hello Foo'
"""
# relative URL indicates this route should be appended to the default rule for the class
rule('<name>')
def get(self, name='World'):
return 'Hello {}'.format(name)
class HWRuleDefault(BaseView):
"""
GET /hwrd -> 'Hello RD'
GET /hwrd/foo -> 'Hello Foo'
POST /hwrd -> 'post Hello RD'
POST /hwrd-> 'post Hello Foo'
"""
# Since the default rule needs to have defaults, you need to define it and not rely on the
# auto-generated one.
rule('/hwrd', post=True, defaults={'name': 'RD'})
rule('<name>', post=True)
def post(self, name):
return 'post Hello {}'.format(name)
def get(self, name):
return 'Hello {}'.format(name)
class HelloReq(BaseView):
"""
/hello-req -> 404
/hello-req/foo -> 'Hello Foo'
"""
# making the rule absolute disables the default rule that would have been created by the class.
rule('/hello-req/<name>')
def get(self, name):
return 'Hello {}'.format(name)
class Cars(BaseView):
"""
CRUD for a model/entity
GET /cars/list - show list of cars that can be managed
GET /cars/create - show empty car form
POST /cars/create - create a new car
GET /cars/edit/12 - show car form w/ data
POST /cars/edit/12 - update car 12
GET /cars/delete/12 - deletes car 12
"""
@route()
def list(self):
return 'list'
@route()
def create_get(self):
return 'create get'
@route(post_only=True)
def create_post(self):
return 'create post'
@route('<int:car_id>', post=True)
def edit(self, car_id):
if request.method == 'GET':
return 'form w/ data: {}'.format(car_id)
else:
return 'update car: {}'.format(car_id)
@route('<int:ident>')
def delete(self, ident):
return 'delete: {}'.format(ident)
class Tickets(BaseView):
"""
REST API Example
GET /tickets - Retrieves a list of tickets
GET /tickets/12 - Retrieves a specific ticket
POST /tickets - Creates a new ticket
PUT /tickets/12 - Updates ticket #12
PATCH /tickets/12 - Partially updates ticket #12
DELETE /tickets/12 - Deletes ticket #12
"""
rule('/tickets', methods=['GET', 'POST'])
rule('<int:ticket_id>', methods=['GET', 'PUT', 'PATCH', 'DELETE'])
def get(self, ticket_id=None):
if ticket_id:
return 'single'
return 'list'
def post(self):
return 'create'
def put(self, ticket_id):
return 'update'
def patch(self, ticket_id):
return 'partial update'
def delete(self, ticket_id):
return 'delete'
class Misc(BaseView):
rule('/misc')
rule('foo')
rule('/misc2')
rule('bar', post_only=True)
def get(self):
return 'get'
def post(self):
return 'post'
@route('/an-abs-url')
def an_abs_url(self):
return 'found me'
@route('8')
@route('9')
def two_routes(self):
return '17'
class CrudBase(KegBaseView):
"""
Testing a view that is intended to be used as an abstract class: it should be inherited
but will never be instantiated itself.
This is similiar to BaseView, except that this class is intended to represent a class that
isn't confined to a certain blueprint, but intended to give similar functionality in a way
that can be used in/with different blueprints.
Pain points that this identifies:
- This class itself should be created without throwing an exception due to not having
a blueprint.
- The creation of this class should not cause any application routes to be defined.
- The creation of a subclass of this class should result in routes for the subclass
being created.
"""
@route()
def list(self):
return 'listing {}'.format(self.__class__.__name__)
class Trucks(CrudBase):
"""
CRUD for a model/entity
GET /trucks/list
"""
blueprint = blueprint
class Planes(CrudBase):
"""
CRUD for a model/entity
GET /planes/list
"""
blueprint = blueprint
| |
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations.
There are "opportunistic" tests which allows testing against all 3 databases
(sqlite in memory, mysql, pg) in a properly configured unit test environment.
For the opportunistic testing you need to set up db's named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost. The
test will then use that db and u/p combo to run the tests.
For postgres on Ubuntu this can be done with the following commands::
| sudo -u postgres psql
| postgres=# create user openstack_citest with createdb login password
| 'openstack_citest';
| postgres=# create database openstack_citest with owner openstack_citest;
"""
import glob
# NOTE(dhellmann): Use stdlib logging instead of oslo.log because we
# need to call methods on the logger that are not exposed through the
# adapter provided by oslo.log.
import logging
import os
from migrate import UniqueConstraint
from migrate.versioning import repository
import mock
from oslo_db.sqlalchemy import test_base
from oslo_db.sqlalchemy import test_migrations
from oslo_db.sqlalchemy import utils as oslodbutils
import sqlalchemy
from sqlalchemy.engine import reflection
import sqlalchemy.exc
from sqlalchemy.sql import null
from nova.db import migration
from nova.db.sqlalchemy import migrate_repo
from nova.db.sqlalchemy import migration as sa_migration
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy import utils as db_utils
from nova import exception
from nova import test
from nova.tests import fixtures as nova_fixtures
LOG = logging.getLogger(__name__)
class NovaMigrationsCheckers(test_migrations.ModelsMigrationsSync,
test_migrations.WalkVersionsMixin):
"""Test sqlalchemy-migrate migrations."""
TIMEOUT_SCALING_FACTOR = 2
@property
def INIT_VERSION(self):
return migration.db_initial_version()
@property
def REPOSITORY(self):
return repository.Repository(
os.path.abspath(os.path.dirname(migrate_repo.__file__)))
@property
def migration_api(self):
return sa_migration.versioning_api
@property
def migrate_engine(self):
return self.engine
def setUp(self):
super(NovaMigrationsCheckers, self).setUp()
# NOTE(viktors): We should reduce log output because it causes issues,
# when we run tests with testr
migrate_log = logging.getLogger('migrate')
old_level = migrate_log.level
migrate_log.setLevel(logging.WARN)
self.addCleanup(migrate_log.setLevel, old_level)
# NOTE(rpodolyaka): we need to repeat the functionality of the base
# test case a bit here as this gets overriden by oslotest base test
# case and nova base test case cleanup must be the last one (as it
# deletes attributes of test case instances)
self.useFixture(nova_fixtures.Timeout(
os.environ.get('OS_TEST_TIMEOUT', 0),
self.TIMEOUT_SCALING_FACTOR))
def assertColumnExists(self, engine, table_name, column):
self.assertTrue(oslodbutils.column_exists(engine, table_name, column),
'Column %s.%s does not exist' % (table_name, column))
def assertColumnNotExists(self, engine, table_name, column):
self.assertFalse(oslodbutils.column_exists(engine, table_name, column),
'Column %s.%s should not exist' % (table_name, column))
def assertTableNotExists(self, engine, table):
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
oslodbutils.get_table, engine, table)
def assertIndexExists(self, engine, table_name, index):
self.assertTrue(oslodbutils.index_exists(engine, table_name, index),
'Index %s on table %s does not exist' %
(index, table_name))
def assertIndexNotExists(self, engine, table_name, index):
self.assertFalse(oslodbutils.index_exists(engine, table_name, index),
'Index %s on table %s should not exist' %
(index, table_name))
def assertIndexMembers(self, engine, table, index, members):
# NOTE(johannes): Order of columns can matter. Most SQL databases
# can use the leading columns for optimizing queries that don't
# include all of the covered columns.
self.assertIndexExists(engine, table, index)
t = oslodbutils.get_table(engine, table)
index_columns = None
for idx in t.indexes:
if idx.name == index:
index_columns = [c.name for c in idx.columns]
break
self.assertEqual(members, index_columns)
# Implementations for ModelsMigrationsSync
def db_sync(self, engine):
with mock.patch.object(sa_migration, 'get_engine',
return_value=engine):
sa_migration.db_sync()
def get_engine(self):
return self.migrate_engine
def get_metadata(self):
return models.BASE.metadata
def include_object(self, object_, name, type_, reflected, compare_to):
if type_ == 'table':
# migrate_version is a sqlalchemy-migrate control table and
# isn't included in the model. shadow_* are generated from
# the model and have their own tests to ensure they don't
# drift.
if name == 'migrate_version' or name.startswith('shadow_'):
return False
return True
def _skippable_migrations(self):
special = [
216, # Havana
272, # NOOP migration due to revert
]
havana_placeholders = range(217, 227)
icehouse_placeholders = range(235, 244)
juno_placeholders = range(255, 265)
kilo_placeholders = range(281, 291)
return (special +
havana_placeholders +
icehouse_placeholders +
juno_placeholders +
kilo_placeholders)
def migrate_up(self, version, with_data=False):
if with_data:
check = getattr(self, "_check_%03d" % version, None)
if version not in self._skippable_migrations():
self.assertIsNotNone(check,
('DB Migration %i does not have a '
'test. Please add one!') % version)
super(NovaMigrationsCheckers, self).migrate_up(version, with_data)
def test_walk_versions(self):
self.walk_versions(snake_walk=False, downgrade=False)
def _check_227(self, engine, data):
table = oslodbutils.get_table(engine, 'project_user_quotas')
# Insert fake_quotas with the longest resource name.
fake_quotas = {'id': 5,
'project_id': 'fake_project',
'user_id': 'fake_user',
'resource': 'injected_file_content_bytes',
'hard_limit': 10}
table.insert().execute(fake_quotas)
# Check we can get the longest resource name.
quota = table.select(table.c.id == 5).execute().first()
self.assertEqual(quota['resource'], 'injected_file_content_bytes')
def _check_228(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'metrics')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.metrics.type,
sqlalchemy.types.Text)
def _check_229(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'extra_resources')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.extra_resources.type,
sqlalchemy.types.Text)
def _check_230(self, engine, data):
for table_name in ['instance_actions_events',
'shadow_instance_actions_events']:
self.assertColumnExists(engine, table_name, 'host')
self.assertColumnExists(engine, table_name, 'details')
action_events = oslodbutils.get_table(engine,
'instance_actions_events')
self.assertIsInstance(action_events.c.host.type,
sqlalchemy.types.String)
self.assertIsInstance(action_events.c.details.type,
sqlalchemy.types.Text)
def _check_231(self, engine, data):
self.assertColumnExists(engine, 'instances', 'ephemeral_key_uuid')
instances = oslodbutils.get_table(engine, 'instances')
self.assertIsInstance(instances.c.ephemeral_key_uuid.type,
sqlalchemy.types.String)
self.assertTrue(db_utils.check_shadow_table(engine, 'instances'))
def _check_232(self, engine, data):
table_names = ['compute_node_stats', 'compute_nodes',
'instance_actions', 'instance_actions_events',
'instance_faults', 'migrations']
for table_name in table_names:
self.assertTableNotExists(engine, 'dump_' + table_name)
def _check_233(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'stats')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.stats.type,
sqlalchemy.types.Text)
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
oslodbutils.get_table, engine, 'compute_node_stats')
def _check_234(self, engine, data):
self.assertIndexMembers(engine, 'reservations',
'reservations_deleted_expire_idx',
['deleted', 'expire'])
def _check_244(self, engine, data):
volume_usage_cache = oslodbutils.get_table(
engine, 'volume_usage_cache')
self.assertEqual(64, volume_usage_cache.c.user_id.type.length)
def _pre_upgrade_245(self, engine):
# create a fake network
networks = oslodbutils.get_table(engine, 'networks')
fake_network = {'id': 1}
networks.insert().execute(fake_network)
def _check_245(self, engine, data):
networks = oslodbutils.get_table(engine, 'networks')
network = networks.select(networks.c.id == 1).execute().first()
# mtu should default to None
self.assertIsNone(network.mtu)
# dhcp_server should default to None
self.assertIsNone(network.dhcp_server)
# enable dhcp should default to true
self.assertTrue(network.enable_dhcp)
# share address should default to false
self.assertFalse(network.share_address)
def _check_246(self, engine, data):
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
self.assertEqual(1, len([fk for fk in pci_devices.foreign_keys
if fk.parent.name == 'compute_node_id']))
def _check_247(self, engine, data):
quota_usages = oslodbutils.get_table(engine, 'quota_usages')
self.assertFalse(quota_usages.c.resource.nullable)
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
self.assertTrue(pci_devices.c.deleted.nullable)
self.assertFalse(pci_devices.c.product_id.nullable)
self.assertFalse(pci_devices.c.vendor_id.nullable)
self.assertFalse(pci_devices.c.dev_type.nullable)
def _check_248(self, engine, data):
self.assertIndexMembers(engine, 'reservations',
'reservations_deleted_expire_idx',
['deleted', 'expire'])
def _check_249(self, engine, data):
# Assert that only one index exists that covers columns
# instance_uuid and device_name
bdm = oslodbutils.get_table(engine, 'block_device_mapping')
self.assertEqual(1, len([i for i in bdm.indexes
if [c.name for c in i.columns] ==
['instance_uuid', 'device_name']]))
def _check_250(self, engine, data):
self.assertTableNotExists(engine, 'instance_group_metadata')
self.assertTableNotExists(engine, 'shadow_instance_group_metadata')
def _check_251(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'numa_topology')
self.assertColumnExists(engine, 'shadow_compute_nodes',
'numa_topology')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
shadow_compute_nodes = oslodbutils.get_table(engine,
'shadow_compute_nodes')
self.assertIsInstance(compute_nodes.c.numa_topology.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_compute_nodes.c.numa_topology.type,
sqlalchemy.types.Text)
def _check_252(self, engine, data):
oslodbutils.get_table(engine, 'instance_extra')
oslodbutils.get_table(engine, 'shadow_instance_extra')
self.assertIndexMembers(engine, 'instance_extra',
'instance_extra_idx',
['instance_uuid'])
def _check_253(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'pci_requests')
self.assertColumnExists(
engine, 'shadow_instance_extra', 'pci_requests')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
shadow_instance_extra = oslodbutils.get_table(engine,
'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.pci_requests.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.pci_requests.type,
sqlalchemy.types.Text)
def _check_254(self, engine, data):
self.assertColumnExists(engine, 'pci_devices', 'request_id')
self.assertColumnExists(
engine, 'shadow_pci_devices', 'request_id')
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
shadow_pci_devices = oslodbutils.get_table(
engine, 'shadow_pci_devices')
self.assertIsInstance(pci_devices.c.request_id.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_pci_devices.c.request_id.type,
sqlalchemy.types.String)
def _check_265(self, engine, data):
# Assert that only one index exists that covers columns
# host and deleted
instances = oslodbutils.get_table(engine, 'instances')
self.assertEqual(1, len([i for i in instances.indexes
if [c.name for c in i.columns][:2] ==
['host', 'deleted']]))
# and only one index covers host column
iscsi_targets = oslodbutils.get_table(engine, 'iscsi_targets')
self.assertEqual(1, len([i for i in iscsi_targets.indexes
if [c.name for c in i.columns][:1] ==
['host']]))
def _check_266(self, engine, data):
self.assertColumnExists(engine, 'tags', 'resource_id')
self.assertColumnExists(engine, 'tags', 'tag')
table = oslodbutils.get_table(engine, 'tags')
self.assertIsInstance(table.c.resource_id.type,
sqlalchemy.types.String)
self.assertIsInstance(table.c.tag.type,
sqlalchemy.types.String)
def _pre_upgrade_267(self, engine):
# Create a fixed_ips row with a null instance_uuid (if not already
# there) to make sure that's not deleted.
fixed_ips = oslodbutils.get_table(engine, 'fixed_ips')
fake_fixed_ip = {'id': 1}
fixed_ips.insert().execute(fake_fixed_ip)
# Create an instance record with a valid (non-null) UUID so we make
# sure we don't do something stupid and delete valid records.
instances = oslodbutils.get_table(engine, 'instances')
fake_instance = {'id': 1, 'uuid': 'fake-non-null-uuid'}
instances.insert().execute(fake_instance)
# Add a null instance_uuid entry for the volumes table
# since it doesn't have a foreign key back to the instances table.
volumes = oslodbutils.get_table(engine, 'volumes')
fake_volume = {'id': '9c3c317e-24db-4d57-9a6f-96e6d477c1da'}
volumes.insert().execute(fake_volume)
def _check_267(self, engine, data):
# Make sure the column is non-nullable and the UC exists.
fixed_ips = oslodbutils.get_table(engine, 'fixed_ips')
self.assertTrue(fixed_ips.c.instance_uuid.nullable)
fixed_ip = fixed_ips.select(fixed_ips.c.id == 1).execute().first()
self.assertIsNone(fixed_ip.instance_uuid)
instances = oslodbutils.get_table(engine, 'instances')
self.assertFalse(instances.c.uuid.nullable)
inspector = reflection.Inspector.from_engine(engine)
constraints = inspector.get_unique_constraints('instances')
constraint_names = [constraint['name'] for constraint in constraints]
self.assertIn('uniq_instances0uuid', constraint_names)
# Make sure the instances record with the valid uuid is still there.
instance = instances.select(instances.c.id == 1).execute().first()
self.assertIsNotNone(instance)
# Check that the null entry in the volumes table is still there since
# we skipped tables that don't have FK's back to the instances table.
volumes = oslodbutils.get_table(engine, 'volumes')
self.assertTrue(volumes.c.instance_uuid.nullable)
volume = fixed_ips.select(
volumes.c.id == '9c3c317e-24db-4d57-9a6f-96e6d477c1da'
).execute().first()
self.assertIsNone(volume.instance_uuid)
def test_migration_267(self):
# This is separate from test_walk_versions so we can test the case
# where there are non-null instance_uuid entries in the database which
# cause the 267 migration to fail.
engine = self.migrate_engine
self.migration_api.version_control(
engine, self.REPOSITORY, self.INIT_VERSION)
self.migration_api.upgrade(engine, self.REPOSITORY, 266)
# Create a consoles record with a null instance_uuid so
# we can test that the upgrade fails if that entry is found.
# NOTE(mriedem): We use the consoles table since that's the only table
# created in the 216 migration with a ForeignKey created on the
# instance_uuid table for sqlite.
consoles = oslodbutils.get_table(engine, 'consoles')
fake_console = {'id': 1}
consoles.insert().execute(fake_console)
# NOTE(mriedem): We handle the 267 migration where we expect to
# hit a ValidationError on the consoles table to have
# a null instance_uuid entry
ex = self.assertRaises(exception.ValidationError,
self.migration_api.upgrade,
engine, self.REPOSITORY, 267)
self.assertIn("There are 1 records in the "
"'consoles' table where the uuid or "
"instance_uuid column is NULL.",
ex.kwargs['detail'])
# Remove the consoles entry with the null instance_uuid column.
rows = consoles.delete().where(
consoles.c['instance_uuid'] == null()).execute().rowcount
self.assertEqual(1, rows)
# Now run the 267 upgrade again.
self.migration_api.upgrade(engine, self.REPOSITORY, 267)
# Make sure the consoles entry with the null instance_uuid
# was deleted.
console = consoles.select(consoles.c.id == 1).execute().first()
self.assertIsNone(console)
def _check_268(self, engine, data):
# We can only assert that the col exists, not the unique constraint
# as the engine is running sqlite
self.assertColumnExists(engine, 'compute_nodes', 'host')
self.assertColumnExists(engine, 'shadow_compute_nodes', 'host')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
shadow_compute_nodes = oslodbutils.get_table(
engine, 'shadow_compute_nodes')
self.assertIsInstance(compute_nodes.c.host.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_compute_nodes.c.host.type,
sqlalchemy.types.String)
def _check_269(self, engine, data):
self.assertColumnExists(engine, 'pci_devices', 'numa_node')
self.assertColumnExists(engine, 'shadow_pci_devices', 'numa_node')
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
shadow_pci_devices = oslodbutils.get_table(
engine, 'shadow_pci_devices')
self.assertIsInstance(pci_devices.c.numa_node.type,
sqlalchemy.types.Integer)
self.assertTrue(pci_devices.c.numa_node.nullable)
self.assertIsInstance(shadow_pci_devices.c.numa_node.type,
sqlalchemy.types.Integer)
self.assertTrue(shadow_pci_devices.c.numa_node.nullable)
def _check_270(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'flavor')
self.assertColumnExists(engine, 'shadow_instance_extra', 'flavor')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
shadow_instance_extra = oslodbutils.get_table(
engine, 'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.flavor.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.flavor.type,
sqlalchemy.types.Text)
def _check_271(self, engine, data):
self.assertIndexMembers(engine, 'block_device_mapping',
'snapshot_id', ['snapshot_id'])
self.assertIndexMembers(engine, 'block_device_mapping',
'volume_id', ['volume_id'])
self.assertIndexMembers(engine, 'dns_domains',
'dns_domains_project_id_idx',
['project_id'])
self.assertIndexMembers(engine, 'fixed_ips',
'network_id', ['network_id'])
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_instance_uuid_fkey',
['instance_uuid'])
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_virtual_interface_id_fkey',
['virtual_interface_id'])
self.assertIndexMembers(engine, 'floating_ips',
'fixed_ip_id', ['fixed_ip_id'])
self.assertIndexMembers(engine, 'iscsi_targets',
'iscsi_targets_volume_id_fkey', ['volume_id'])
self.assertIndexMembers(engine, 'virtual_interfaces',
'virtual_interfaces_network_id_idx',
['network_id'])
self.assertIndexMembers(engine, 'virtual_interfaces',
'virtual_interfaces_instance_uuid_fkey',
['instance_uuid'])
# Removed on MySQL, never existed on other databases
self.assertIndexNotExists(engine, 'dns_domains', 'project_id')
self.assertIndexNotExists(engine, 'virtual_interfaces', 'network_id')
def _pre_upgrade_273(self, engine):
if engine.name != 'sqlite':
return
# Drop a variety of unique constraints to ensure that the script
# properly readds them back
for table_name, constraint_name in [
('compute_nodes', 'uniq_compute_nodes0'
'host0hypervisor_hostname'),
('fixed_ips', 'uniq_fixed_ips0address0deleted'),
('instance_info_caches', 'uniq_instance_info_caches0'
'instance_uuid'),
('instance_type_projects', 'uniq_instance_type_projects0'
'instance_type_id0project_id0'
'deleted'),
('pci_devices', 'uniq_pci_devices0compute_node_id0'
'address0deleted'),
('virtual_interfaces', 'uniq_virtual_interfaces0'
'address0deleted')]:
table = oslodbutils.get_table(engine, table_name)
constraints = [c for c in table.constraints
if c.name == constraint_name]
for cons in constraints:
# Need to use sqlalchemy-migrate UniqueConstraint
cons = UniqueConstraint(*[c.name for c in cons.columns],
name=cons.name,
table=table)
cons.drop()
def _check_273(self, engine, data):
for src_table, src_column, dst_table, dst_column in [
('fixed_ips', 'instance_uuid', 'instances', 'uuid'),
('block_device_mapping', 'instance_uuid', 'instances', 'uuid'),
('instance_info_caches', 'instance_uuid', 'instances', 'uuid'),
('instance_metadata', 'instance_uuid', 'instances', 'uuid'),
('instance_system_metadata', 'instance_uuid',
'instances', 'uuid'),
('instance_type_projects', 'instance_type_id',
'instance_types', 'id'),
('iscsi_targets', 'volume_id', 'volumes', 'id'),
('reservations', 'usage_id', 'quota_usages', 'id'),
('security_group_instance_association', 'instance_uuid',
'instances', 'uuid'),
('security_group_instance_association', 'security_group_id',
'security_groups', 'id'),
('virtual_interfaces', 'instance_uuid', 'instances', 'uuid'),
('compute_nodes', 'service_id', 'services', 'id'),
('instance_actions', 'instance_uuid', 'instances', 'uuid'),
('instance_faults', 'instance_uuid', 'instances', 'uuid'),
('migrations', 'instance_uuid', 'instances', 'uuid')]:
src_table = oslodbutils.get_table(engine, src_table)
fkeys = {fk.parent.name: fk.column
for fk in src_table.foreign_keys}
self.assertIn(src_column, fkeys)
self.assertEqual(fkeys[src_column].table.name, dst_table)
self.assertEqual(fkeys[src_column].name, dst_column)
def _check_274(self, engine, data):
self.assertIndexMembers(engine, 'instances',
'instances_project_id_deleted_idx',
['project_id', 'deleted'])
self.assertIndexNotExists(engine, 'instances', 'project_id')
def _pre_upgrade_275(self, engine):
# Create a keypair record so we can test that the upgrade will set
# 'ssh' as default value in the new column for the previous keypair
# entries.
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
fake_keypair = {'name': 'test-migr'}
key_pairs.insert().execute(fake_keypair)
def _check_275(self, engine, data):
self.assertColumnExists(engine, 'key_pairs', 'type')
self.assertColumnExists(engine, 'shadow_key_pairs', 'type')
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
shadow_key_pairs = oslodbutils.get_table(engine, 'shadow_key_pairs')
self.assertIsInstance(key_pairs.c.type.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_key_pairs.c.type.type,
sqlalchemy.types.String)
# Make sure the keypair entry will have the type 'ssh'
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
keypair = key_pairs.select(
key_pairs.c.name == 'test-migr').execute().first()
self.assertEqual('ssh', keypair.type)
def _check_276(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'vcpu_model')
self.assertColumnExists(engine, 'shadow_instance_extra', 'vcpu_model')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
shadow_instance_extra = oslodbutils.get_table(
engine, 'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.vcpu_model.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.vcpu_model.type,
sqlalchemy.types.Text)
def _check_277(self, engine, data):
self.assertIndexMembers(engine, 'fixed_ips',
'fixed_ips_deleted_allocated_updated_at_idx',
['deleted', 'allocated', 'updated_at'])
def _check_278(self, engine, data):
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertEqual(0, len([fk for fk in compute_nodes.foreign_keys
if fk.parent.name == 'service_id']))
self.assertTrue(compute_nodes.c.service_id.nullable)
def _check_279(self, engine, data):
inspector = reflection.Inspector.from_engine(engine)
constraints = inspector.get_unique_constraints('compute_nodes')
constraint_names = [constraint['name'] for constraint in constraints]
self.assertNotIn('uniq_compute_nodes0host0hypervisor_hostname',
constraint_names)
self.assertIn('uniq_compute_nodes0host0hypervisor_hostname0deleted',
constraint_names)
def _check_280(self, engine, data):
key_pairs = oslodbutils.get_table(engine, 'key_pairs')
self.assertFalse(key_pairs.c.name.nullable)
def _check_291(self, engine, data):
# NOTE(danms): This is a dummy migration that just does a consistency
# check
pass
def _check_292(self, engine, data):
self.assertTableNotExists(engine, 'iscsi_targets')
self.assertTableNotExists(engine, 'volumes')
self.assertTableNotExists(engine, 'shadow_iscsi_targets')
self.assertTableNotExists(engine, 'shadow_volumes')
def _pre_upgrade_293(self, engine):
migrations = oslodbutils.get_table(engine, 'migrations')
fake_migration = {}
migrations.insert().execute(fake_migration)
def _check_293(self, engine, data):
self.assertColumnExists(engine, 'migrations', 'migration_type')
self.assertColumnExists(engine, 'shadow_migrations', 'migration_type')
migrations = oslodbutils.get_table(engine, 'migrations')
fake_migration = migrations.select().execute().first()
self.assertIsNone(fake_migration.migration_type)
self.assertFalse(fake_migration.hidden)
def _check_294(self, engine, data):
self.assertColumnExists(engine, 'services', 'last_seen_up')
self.assertColumnExists(engine, 'shadow_services', 'last_seen_up')
services = oslodbutils.get_table(engine, 'services')
shadow_services = oslodbutils.get_table(
engine, 'shadow_services')
self.assertIsInstance(services.c.last_seen_up.type,
sqlalchemy.types.DateTime)
self.assertIsInstance(shadow_services.c.last_seen_up.type,
sqlalchemy.types.DateTime)
class TestNovaMigrationsSQLite(NovaMigrationsCheckers,
test_base.DbTestCase,
test.NoDBTestCase):
pass
class TestNovaMigrationsMySQL(NovaMigrationsCheckers,
test_base.MySQLOpportunisticTestCase,
test.NoDBTestCase):
def test_innodb_tables(self):
with mock.patch.object(sa_migration, 'get_engine',
return_value=self.migrate_engine):
sa_migration.db_sync()
total = self.migrate_engine.execute(
"SELECT count(*) "
"FROM information_schema.TABLES "
"WHERE TABLE_SCHEMA = '%(database)s'" %
{'database': self.migrate_engine.url.database})
self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")
noninnodb = self.migrate_engine.execute(
"SELECT count(*) "
"FROM information_schema.TABLES "
"WHERE TABLE_SCHEMA='%(database)s' "
"AND ENGINE != 'InnoDB' "
"AND TABLE_NAME != 'migrate_version'" %
{'database': self.migrate_engine.url.database})
count = noninnodb.scalar()
self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
class TestNovaMigrationsPostgreSQL(NovaMigrationsCheckers,
test_base.PostgreSQLOpportunisticTestCase,
test.NoDBTestCase):
pass
class ProjectTestCase(test.NoDBTestCase):
def test_no_migrations_have_downgrade(self):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../')
py_glob = os.path.join(topdir, "nova", "db", "sqlalchemy",
"migrate_repo", "versions", "*.py")
includes_downgrade = []
for path in glob.iglob(py_glob):
has_upgrade = False
has_downgrade = False
with open(path, "r") as f:
for line in f:
if 'def upgrade(' in line:
has_upgrade = True
if 'def downgrade(' in line:
has_downgrade = True
if has_upgrade and has_downgrade:
fname = os.path.basename(path)
includes_downgrade.append(fname)
helpful_msg = ("The following migrations have a downgrade "
"which is not supported:"
"\n\t%s" % '\n\t'.join(sorted(includes_downgrade)))
self.assertFalse(includes_downgrade, helpful_msg)
| |
from .. import query_processor
from nose.tools import assert_true, assert_equal
from numpy import random
import ast
# Ten tracts traversing random labels
another_set = True
while (another_set):
tracts_labels = dict([(i, set(random.randint(100, size=2))) for i in xrange(100)])
labels_tracts = query_processor.labels_for_tracts(tracts_labels)
another_set = 0 not in labels_tracts.keys() or 1 not in labels_tracts.keys()
tracts_in_0 = set().union(*[labels_tracts[label] for label in labels_tracts if label == 0])
tracts_in_all_but_0 = set().union(*[labels_tracts[label] for label in labels_tracts if label != 0])
tract_in_label_0_uniquely = labels_tracts[0].difference(tracts_in_all_but_0)
class DummySpatialIndexing:
def __init__(
self,
crossing_tracts_labels, crossing_labels_tracts,
ending_tracts_labels, ending_labels_tracts,
label_bounding_boxes, tract_bounding_boxes
):
self.crossing_tracts_labels = crossing_tracts_labels
self.crossing_labels_tracts = crossing_labels_tracts
self.ending_tracts_labels = ending_tracts_labels
self.ending_labels_tracts = ending_labels_tracts
self.label_bounding_boxes = label_bounding_boxes
self.tract_bounding_boxes = tract_bounding_boxes
dummy_spatial_indexing = DummySpatialIndexing(tracts_labels, labels_tracts, ({}, {}), ({}, {}), {}, {})
empty_spatial_indexing = DummySpatialIndexing({}, {}, ({}, {}), ({}, {}), {}, {})
def test_assign():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0] and
query_evaluator.evaluated_queries_info['A'].labels == set((0,))
))
def test_assign_attr():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("a.left=0"))
assert_true((
'a.left' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['a.left'].tracts == labels_tracts[0] and
query_evaluator.evaluated_queries_info['a.left'].labels == set((0,))
))
def test_assign_side():
query_evaluator = query_processor.EvaluateQueries(empty_spatial_indexing)
queries_labels = {
'a.left': set([3, 6]),
'a.right': set([4, 5]),
'b.left': set([3]),
'b.right': set([4]),
'c.left': set([5]),
'c.right': set([6])
}
queries_tracts = {
'a.left': set([]),
'a.right': set([]),
'b.left': set([]),
'b.right': set([]),
'c.left': set([]),
'c.right': set([])
}
query = r"""
b.left=3 ;
b.right = 4;
c.left = 5;
c.right = 6;
a.side = b.side or c.opposite
"""
query_evaluator.visit(ast.parse(query))
assert_equal({k: v.labels for k, v in query_evaluator.evaluated_queries_info.iteritems()}, queries_labels)
assert_equal({k: v.tracts for k, v in query_evaluator.evaluated_queries_info.iteritems()}, queries_tracts)
def test_assign_str():
query_evaluator = query_processor.EvaluateQueries(empty_spatial_indexing)
queries_labels = {
'b.left': set([3]),
'b.right': set([4]),
'c.left': set([5]),
'c.right': set([6]),
'h': set([3, 5])
}
queries_tracts = {
'b.left': set([]),
'b.right': set([]),
'c.left': set([]),
'c.right': set([]),
'h': set([])
}
query = """
b.left=3
b.right = 4
c.left = 5
c.right = 6
h = '*left'
"""
query_evaluator.visit(ast.parse(query))
assert_equal({k: v.labels for k, v in query_evaluator.evaluated_queries_info.iteritems()}, queries_labels)
assert_equal({k: v.tracts for k, v in query_evaluator.evaluated_queries_info.iteritems()}, queries_tracts)
def test_for_list():
query_evaluator = query_processor.EvaluateQueries(empty_spatial_indexing)
queries_tracts = {
'a.left': set([]),
'a.right': set([]),
'b.left': set([]),
'b.right': set([]),
'c.left': set([]),
'c.right': set([]),
'd.left': set([]),
'd.right': set([]),
'e.left': set([]),
'e.right': set([])
}
query = """
a.left= 0
b.left= 1
c.left= 2
d.left= 3
e.left= 4
for i in [a,b,c,d,e]: i.right = i.left
"""
query_evaluator.visit(ast.parse(query))
assert_equal({k: v.tracts for k, v in query_evaluator.evaluated_queries_info.iteritems()}, queries_tracts)
def test_for_str():
query_evaluator = query_processor.EvaluateQueries(empty_spatial_indexing)
queries_tracts = {
'a.left': set([]),
'a.left.right': set([]),
'b.left': set([]),
'b.left.right': set([]),
'c.left': set([]),
'c.left.right': set([]),
'd.left': set([]),
'd.left.right': set([]),
'e.left': set([]),
'e.left.right': set([])
}
query = """
a.left= 0
b.left= 1
c.left= 2
d.left= 3
e.left= 4
for i in '*left': i.right = i
"""
query_evaluator.visit(ast.parse(query))
assert_equal({k: v.tracts for k, v in query_evaluator.evaluated_queries_info.iteritems()}, queries_tracts)
def test_add():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0+1"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0].union(labels_tracts[1]) and
query_evaluator.evaluated_queries_info['A'].labels == set((0, 1))
))
def test_mult():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0 * 1"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0].intersection(labels_tracts[1]) and
query_evaluator.evaluated_queries_info['A'].labels == set((0, 1))
))
def test_sub():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=(0 + 1) - 1"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0].difference(labels_tracts[1]) and
query_evaluator.evaluated_queries_info['A'].labels == set((0,))
))
def test_or():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0 or 1"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0].union(labels_tracts[1]) and
query_evaluator.evaluated_queries_info['A'].labels == set((0, 1))
))
def test_and():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0 and 1"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0].intersection(labels_tracts[1]) and
query_evaluator.evaluated_queries_info['A'].labels == set((0, 1))
))
def test_not_in():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0 or 1 not in 1"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0].difference(labels_tracts[1]) and
query_evaluator.evaluated_queries_info['A'].labels == set((0,))
))
def test_only_sign():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=~0"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == tract_in_label_0_uniquely and
query_evaluator.evaluated_queries_info['A'].labels == set((0,))
))
def test_only():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=only(0)"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == tract_in_label_0_uniquely and
query_evaluator.evaluated_queries_info['A'].labels == set((0,))
))
def test_unsaved_query():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A|=0"))
assert_true((
'A' not in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0] and
query_evaluator.evaluated_queries_info['A'].labels == set((0,))
))
def test_symbolic_assignment():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0; B=A"))
assert_true((
'B' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['B'].tracts == labels_tracts[0] and
query_evaluator.evaluated_queries_info['B'].labels == set((0,))
))
def test_unarySub():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("B=0; A=-B"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == tracts_in_all_but_0 and
query_evaluator.evaluated_queries_info['A'].labels == set(labels_tracts.keys()).difference((0,))
))
def test_not():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A= not 0"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == tracts_in_all_but_0 and
query_evaluator.evaluated_queries_info['A'].labels == set(labels_tracts.keys()).difference((0,))
))
| |
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
"""
Integrates roslaunch remote process launching capabilities.
"""
import logging
import socket
import time
import rosgraph.network as network
import roslaunch.config
import roslaunch.remoteprocess
from roslaunch.remoteprocess import SSHChildROSLaunchProcess
import roslaunch.launch
import roslaunch.server #ROSLaunchParentNode hidden dep
from roslaunch.core import RLException, is_machine_local, printerrlog, printlog
_CHILD_REGISTER_TIMEOUT = 10.0 #seconds
class ROSRemoteRunner(roslaunch.launch.ROSRemoteRunnerIF):
"""
Manages the running of remote roslaunch children
"""
def __init__(self, run_id, rosconfig, pm, server):
"""
:param run_id: roslaunch run_id of this runner, ``str``
:param config: launch configuration, ``ROSConfig``
:param pm process monitor, ``ProcessMonitor``
:param server: roslaunch parent server, ``ROSLaunchParentNode``
"""
self.run_id = run_id
self.rosconfig = rosconfig
self.server = server
self.pm = pm
self.logger = logging.getLogger('roslaunch.remote')
self.listeners = []
self.machine_list = []
self.remote_processes = []
def add_process_listener(self, l):
"""
Listen to events about remote processes dying. Not
threadsafe. Must be called before processes started.
:param l: ProcessListener
"""
self.listeners.append(l)
def _start_child(self, server_node_uri, machine, counter):
# generate a name for the machine. don't use config key as
# it's too long to easily display
name = "%s-%s"%(machine.address, counter)
self.logger.info("remote[%s] starting roslaunch", name)
printlog("remote[%s] starting roslaunch"%name)
p = SSHChildROSLaunchProcess(self.run_id, name, server_node_uri, machine, self.rosconfig.master.uri)
success = p.start()
self.pm.register(p)
if not success: #treat as fatal
raise RLException("unable to start remote roslaunch child: %s"%name)
self.server.add_child(name, p)
return p
def start_children(self):
"""
Start the child roslaunch processes
"""
server_node_uri = self.server.uri
if not server_node_uri:
raise RLException("server URI is not initialized")
# TODOXXX: break out table building code into a separate
# routine so we can unit test it _start_child() should not be
# determining the process name
# Build table of unique machines that we are going to launch on
machines = {}
for n in self.rosconfig.nodes:
if not is_machine_local(n.machine):
machines[n.machine.config_key()] = n.machine
# Launch child roslaunch processes on remote machines
counter = 0
# - keep a list of procs so we can check for those that failed to launch
procs = []
for m in machines:
p = self._start_child(server_node_uri, machines[m], counter)
procs.append(p)
counter += 1
# Wait for all children to call register() callback. The machines can have
# non-uniform registration timeouts. We consider the failure to occur once
# one of the machines has failed to meet it's timeout.
start_t = time.time()
while True:
pending = []
for p in procs:
if not p.is_alive():
raise RLException("remote roslaunch failed to launch: %s"%p.machine.name)
elif not p.uri:
pending.append(p.machine)
if not pending:
break
# timeout is the minimum of the remaining timeouts of the machines
timeout_t = start_t + min([m.timeout for m in pending])
if time.time() > timeout_t:
break
time.sleep(0.1)
if pending:
raise RLException(
"""The following roslaunch remote processes failed to register:
%s
If this is a network latency issue, you may wish to consider setting
<machine timeout="NUMBER OF SECONDS" ... />
in your launch"""%'\n'.join([" * %s (timeout %ss)"%(m.name, m.timeout) for m in pending]))
# convert machine dictionary to a list
self.machine_list = machines.values()
# save a list of the remote processes
self.remote_processes = procs
def _assume_failed(self, nodes, failed):
"""
Utility routine for logging/recording nodes that failed
:param nodes: list of nodes that are assumed to have failed, ``Node``
:param failed: list of names of nodes that have failed to extend, ``[str]``
"""
str_nodes = ["%s/%s"%(n.package, n.type) for n in nodes]
failed.extend(str_nodes)
printerrlog("Launch of the following nodes most likely failed: %s"%', '.join(str_nodes))
def launch_remote_nodes(self):
"""
Contact each child to launch remote nodes
"""
succeeded = []
failed = []
# initialize remote_nodes. we use the machine config key as
# the key for the dictionary so that we can bin the nodes.
self.remote_nodes = {}
for m in self.machine_list:
self.remote_nodes[m.config_key()] = []
# build list of nodes that will be launched by machine
nodes = [x for x in self.rosconfig.nodes if not is_machine_local(x.machine)]
for n in nodes:
self.remote_nodes[n.machine.config_key()].append(n)
for child in self.remote_processes:
nodes = self.remote_nodes[child.machine.config_key()]
body = '\n'.join([n.to_remote_xml() for n in nodes])
# #3799: force utf-8 encoding
xml = '<?xml version="1.0" encoding="utf-8"?>\n<launch>\n%s</launch>'%body
api = child.getapi()
# TODO: timeouts
try:
self.logger.debug("sending [%s] XML [\n%s\n]"%(child.uri, xml))
code, msg, val = api.launch(xml)
if code == 1:
c_succ, c_fail = val
succeeded.extend(c_succ)
failed.extend(c_fail)
else:
printerrlog('error launching on [%s, uri %s]: %s'%(child.name, child.uri, msg))
self._assume_failed(nodes, failed)
except socket.error as e:
errno, msg = e
printerrlog('error launching on [%s, uri %s]: %s'%(child.name, child.uri, str(msg)))
self._assume_failed(nodes, failed)
except socket.gaierror as e:
errno, msg = e
# usually errno == -2. See #815.
child_host, _ = network.parse_http_host_and_port(child.uri)
printerrlog("Unable to contact remote roslaunch at [%s]. This is most likely due to a network misconfiguration with host lookups. Please make sure that you can contact '%s' from this machine"%(child.uri, child_host))
self._assume_failed(nodes, failed)
except Exception as e:
printerrlog('error launching on [%s, uri %s]: %s'%(child.name, child.uri, str(e)))
self._assume_failed(nodes, failed)
return succeeded, failed
| |
# Copyright 2013 Mirantis Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
from unittest import mock
from neutronclient.neutron.v2_0 import agentscheduler
from neutronclient.neutron.v2_0 import network
from neutronclient.tests.unit import test_cli20
AGENT_ID = 'agent_id1'
NETWORK_ID = 'net_id1'
ROUTER_ID = 'router_id1'
class CLITestV20AgentScheduler(test_cli20.CLITestV20Base):
def _test_add_to_agent(self, resource, cmd, cmd_args, destination,
body, result):
path = ((self.client.agent_path + destination) %
cmd_args[0])
result_str = self.client.serialize(result)
return_tup = (test_cli20.MyResp(200), result_str)
cmd_parser = cmd.get_parser('test_' + resource)
parsed_args = cmd_parser.parse_args(cmd_args)
with mock.patch.object(cmd, "get_client",
return_value=self.client) as mock_get_client, \
mock.patch.object(self.client.httpclient, "request",
return_value=return_tup) as mock_request:
cmd.run(parsed_args)
mock_get_client.assert_called_once_with()
mock_request.assert_called_once_with(
test_cli20.end_url(path), 'POST',
body=test_cli20.MyComparator(body, self.client),
headers=test_cli20.ContainsKeyValue(
{'X-Auth-Token': test_cli20.TOKEN}))
def _test_remove_from_agent(self, resource, cmd, cmd_args, destination):
path = ((self.client.agent_path + destination + '/%s') %
cmd_args)
return_tup = (test_cli20.MyResp(204), None)
cmd_parser = cmd.get_parser('test_' + resource)
parsed_args = cmd_parser.parse_args(cmd_args)
with mock.patch.object(cmd, "get_client",
return_value=self.client) as mock_get_client, \
mock.patch.object(self.client.httpclient, "request",
return_value=return_tup) as mock_request:
cmd.run(parsed_args)
mock_get_client.assert_called_once_with()
mock_request.assert_called_once_with(
test_cli20.end_url(path), 'DELETE',
body=None,
headers=test_cli20.ContainsKeyValue(
{'X-Auth-Token': test_cli20.TOKEN}))
class CLITestV20DHCPAgentScheduler(CLITestV20AgentScheduler):
def test_add_network_to_agent(self):
resource = 'agent'
cmd = agentscheduler.AddNetworkToDhcpAgent(
test_cli20.MyApp(sys.stdout), None)
args = (AGENT_ID, NETWORK_ID)
body = {'network_id': NETWORK_ID}
result = {'network_id': 'net_id', }
self._test_add_to_agent(resource, cmd, args, self.client.DHCP_NETS,
body, result)
def test_remove_network_from_agent(self):
resource = 'agent'
cmd = agentscheduler.RemoveNetworkFromDhcpAgent(
test_cli20.MyApp(sys.stdout), None)
args = (AGENT_ID, NETWORK_ID)
self._test_remove_from_agent(resource, cmd, args,
self.client.DHCP_NETS)
@mock.patch.object(network.ListNetwork, "extend_list")
def test_list_networks_on_agent(self, mock_extend_list):
resources = 'networks'
cmd = agentscheduler.ListNetworksOnDhcpAgent(
test_cli20.MyApp(sys.stdout), None)
agent_id = 'agent_id1'
path = ((self.client.agent_path + self.client.DHCP_NETS) %
agent_id)
self._test_list_resources(resources, cmd, base_args=[agent_id],
path=path)
mock_extend_list.assert_called_once_with(test_cli20.IsA(list),
mock.ANY)
def test_list_agents_hosting_network(self):
resources = 'agent'
cmd = agentscheduler.ListDhcpAgentsHostingNetwork(
test_cli20.MyApp(sys.stdout), None)
agent_id = 'agent_id1'
path = ((self.client.network_path + self.client.DHCP_AGENTS) %
agent_id)
contents = {self.id_field: 'myid1', 'alive': True}
self._test_list_resources(resources, cmd, base_args=[agent_id],
path=path, response_contents=contents)
class CLITestV20L3AgentScheduler(CLITestV20AgentScheduler):
def test_add_router_to_agent(self):
resource = 'agent'
cmd = agentscheduler.AddRouterToL3Agent(
test_cli20.MyApp(sys.stdout), None)
args = (AGENT_ID, ROUTER_ID)
body = {'router_id': ROUTER_ID}
result = {'network_id': 'net_id', }
self._test_add_to_agent(resource, cmd, args, self.client.L3_ROUTERS,
body, result)
def test_remove_router_from_agent(self):
resource = 'agent'
cmd = agentscheduler.RemoveRouterFromL3Agent(
test_cli20.MyApp(sys.stdout), None)
args = (AGENT_ID, ROUTER_ID)
self._test_remove_from_agent(resource, cmd, args,
self.client.L3_ROUTERS)
def test_list_routers_on_agent(self):
resources = 'router'
cmd = agentscheduler.ListRoutersOnL3Agent(
test_cli20.MyApp(sys.stdout), None)
agent_id = 'agent_id1'
path = ((self.client.agent_path + self.client.L3_ROUTERS) %
agent_id)
contents = {self.id_field: 'myid1', 'name': 'my_name'}
self._test_list_resources(resources, cmd, base_args=[agent_id],
path=path, response_contents=contents)
def test_list_agents_hosting_router(self):
resources = 'agent'
cmd = agentscheduler.ListL3AgentsHostingRouter(
test_cli20.MyApp(sys.stdout), None)
agent_id = 'agent_id1'
path = ((self.client.router_path + self.client.L3_AGENTS) %
agent_id)
contents = {self.id_field: 'myid1', 'alive': True}
self._test_list_resources(resources, cmd, base_args=[agent_id],
path=path, response_contents=contents)
class CLITestV20LBaaSAgentScheduler(test_cli20.CLITestV20Base):
def test_list_pools_on_agent(self):
resources = 'pools'
cmd = agentscheduler.ListPoolsOnLbaasAgent(
test_cli20.MyApp(sys.stdout), None)
agent_id = 'agent_id1'
path = ((self.client.agent_path + self.client.LOADBALANCER_POOLS) %
agent_id)
self._test_list_resources(resources, cmd, base_args=[agent_id],
path=path)
def test_get_lbaas_agent_hosting_pool(self):
resources = 'agent'
cmd = agentscheduler.GetLbaasAgentHostingPool(
test_cli20.MyApp(sys.stdout), None)
pool_id = 'pool_id1'
path = ((self.client.pool_path + self.client.LOADBALANCER_AGENT) %
pool_id)
contents = {self.id_field: 'myid1', 'alive': True}
self._test_list_resources(resources, cmd, base_args=[pool_id],
path=path, response_contents=contents)
class CLITestV20LBaaSV2AgentScheduler(test_cli20.CLITestV20Base):
def test_list_loadbalancers_on_agent(self):
resources = 'loadbalancers'
cmd = agentscheduler.ListLoadBalancersOnLbaasAgent(
test_cli20.MyApp(sys.stdout), None)
agent_id = 'agent_id1'
path = ((self.client.agent_path + self.client.AGENT_LOADBALANCERS) %
agent_id)
self._test_list_resources(resources, cmd, base_args=[agent_id],
path=path)
def test_get_lbaas_agent_hosting_pool(self):
resources = 'agent'
cmd = agentscheduler.GetLbaasAgentHostingLoadBalancer(
test_cli20.MyApp(sys.stdout), None)
lb_id = 'lb_id1'
path = ((self.client.lbaas_loadbalancer_path +
self.client.LOADBALANCER_HOSTING_AGENT) % lb_id)
contents = {self.id_field: 'myid1', 'alive': True}
self._test_list_resources(resources, cmd, base_args=[lb_id],
path=path, response_contents=contents)
| |
"""
CmdSethandler
The Cmdsethandler tracks an object's 'Current CmdSet', which is the
current merged sum of all CmdSets added to it.
A CmdSet constitues a set of commands. The CmdSet works as a special
intelligent container that, when added to other CmdSet make sure that
same-name commands are treated correctly (usually so there are no
doublets). This temporary but up-to-date merger of CmdSet is jointly
called the Current Cmset. It is this Current CmdSet that the
commandhandler looks through whenever a player enters a command (it
also adds CmdSets from objects in the room in real-time). All player
objects have a 'default cmdset' containing all the normal in-game mud
commands (look etc).
So what is all this cmdset complexity good for?
In its simplest form, a CmdSet has no commands, only a key name. In
this case the cmdset's use is up to each individual game - it can be
used by an AI module for example (mobs in cmdset 'roam' move from room
to room, in cmdset 'attack' they enter combat with players).
Defining commands in cmdsets offer some further powerful game-design
consequences however. Here are some examples:
As mentioned above, all players always have at least the Default
CmdSet. This contains the set of all normal-use commands in-game,
stuff like look and @desc etc. Now assume our players end up in a dark
room. You don't want the player to be able to do much in that dark
room unless they light a candle. You could handle this by changing all
your normal commands to check if the player is in a dark room. This
rapidly goes unwieldly and error prone. Instead you just define a
cmdset with only those commands you want to be available in the 'dark'
cmdset - maybe a modified look command and a 'light candle' command -
and have this completely replace the default cmdset.
Another example: Say you want your players to be able to go
fishing. You could implement this as a 'fish' command that fails
whenever the player has no fishing rod. Easy enough. But what if you
want to make fishing more complex - maybe you want four-five different
commands for throwing your line, reeling in, etc? Most players won't
(we assume) have fishing gear, and having all those detailed commands
is cluttering up the command list. And what if you want to use the
'throw' command also for throwing rocks etc instead of 'using it up'
for a minor thing like fishing?
So instead you put all those detailed fishing commands into their own
CommandSet called 'Fishing'. Whenever the player gives the command
'fish' (presumably the code checks there is also water nearby), only
THEN this CommandSet is added to the Cmdhandler of the player. The
'throw' command (which normally throws rocks) is replaced by the
custom 'fishing variant' of throw. What has happened is that the
Fishing CommandSet was merged on top of the Default ones, and due to
how we defined it, its command overrules the default ones.
When we are tired of fishing, we give the 'go home' command (or
whatever) and the Cmdhandler simply removes the fishing CommandSet
so that we are back at defaults (and can throw rocks again).
Since any number of CommandSets can be piled on top of each other, you
can then implement separate sets for different situations. For
example, you can have a 'On a boat' set, onto which you then tack on
the 'Fishing' set. Fishing from a boat? No problem!
"""
from django.conf import settings
from src.utils import logger, utils
from src.commands.cmdset import CmdSet
from src.server.models import ServerConfig
from django.utils.translation import ugettext as _
__all__ = ("import_cmdset", "CmdSetHandler")
_CACHED_CMDSETS = {}
_CMDSET_PATHS = utils.make_iter(settings.CMDSET_PATHS)
class _ErrorCmdSet(CmdSet):
"This is a special cmdset used to report errors"
key = "_CMDSET_ERROR"
errmessage = "Error when loading cmdset."
class _EmptyCmdSet(CmdSet):
"This cmdset represents an empty cmdset"
key = "_EMPTY_CMDSET"
priority = -101
mergetype = "Union"
def import_cmdset(path, cmdsetobj, emit_to_obj=None, no_logging=False):
"""
This helper function is used by the cmdsethandler to load a cmdset
instance from a python module, given a python_path. It's usually accessed
through the cmdsethandler's add() and add_default() methods.
path - This is the full path to the cmdset object on python dot-form
cmdsetobj - the database object/typeclass on which this cmdset is to be
assigned (this can be also channels and exits, as well as players
but there will always be such an object)
emit_to_obj - if given, error is emitted to this object (in addition
to logging)
no_logging - don't log/send error messages. This can be useful
if import_cmdset is just used to check if this is a
valid python path or not.
function returns None if an error was encountered or path not found.
"""
python_paths = [path] + ["%s.%s" % (prefix, path)
for prefix in _CMDSET_PATHS if not path.startswith(prefix)]
errstring = ""
for python_path in python_paths:
try:
#print "importing %s: _CACHED_CMDSETS=%s" % (python_path, _CACHED_CMDSETS)
wanted_cache_key = python_path
cmdsetclass = _CACHED_CMDSETS.get(wanted_cache_key, None)
errstring = ""
if not cmdsetclass:
#print "cmdset '%s' not in cache. Reloading %s on %s." % (wanted_cache_key, python_path, cmdsetobj)
# Not in cache. Reload from disk.
modulepath, classname = python_path.rsplit('.', 1)
module = __import__(modulepath, fromlist=[True])
cmdsetclass = module.__dict__[classname]
_CACHED_CMDSETS[wanted_cache_key] = cmdsetclass
#instantiate the cmdset (and catch its errors)
if callable(cmdsetclass):
cmdsetclass = cmdsetclass(cmdsetobj)
return cmdsetclass
except ImportError, e:
errstring += _("Error loading cmdset '%s': %s.")
errstring = errstring % (modulepath, e)
except KeyError:
errstring += _("Error in loading cmdset: No cmdset class '%(classname)s' in %(modulepath)s.")
errstring = errstring % {"classname": classname,
"modulepath": modulepath}
except SyntaxError, e:
errstring += _("SyntaxError encountered when loading cmdset '%s': %s.")
errstring = errstring % (modulepath, e)
except Exception, e:
errstring += _("Compile/Run error when loading cmdset '%s': %s.")
errstring = errstring % (python_path, e)
if errstring:
# returning an empty error cmdset
if not no_logging:
logger.log_errmsg(errstring)
if emit_to_obj and not ServerConfig.objects.conf("server_starting_mode"):
emit_to_obj.msg(errstring)
err_cmdset = _ErrorCmdSet()
err_cmdset.errmessage = errstring + _("\n (See log for details.)")
return err_cmdset
# classes
class CmdSetHandler(object):
"""
The CmdSetHandler is always stored on an object, this object is supplied
as an argument.
The 'current' cmdset is the merged set currently active for this object.
This is the set the game engine will retrieve when determining which
commands are available to the object. The cmdset_stack holds a history of
all CmdSets to allow the handler to remove/add cmdsets at will. Doing so
will re-calculate the 'current' cmdset.
"""
def __init__(self, obj, init_true=True):
"""
This method is called whenever an object is recreated.
obj - this is a reference to the game object this handler
belongs to.
"""
self.obj = obj
# the id of the "merged" current cmdset for easy access.
self.key = None
# this holds the "merged" current command set
self.current = None
# this holds a history of CommandSets
self.cmdset_stack = [_EmptyCmdSet(cmdsetobj=self.obj)]
# this tracks which mergetypes are actually in play in the stack
self.mergetype_stack = ["Union"]
# the subset of the cmdset_paths that are to be stored in the database
self.permanent_paths = [""]
if init_true:
self.update(init_mode=True) #is then called from the object __init__.
def __str__(self):
"Display current commands"
string = ""
mergelist = []
if len(self.cmdset_stack) > 1:
# We have more than one cmdset in stack; list them all
#print self.cmdset_stack, self.mergetype_stack
for snum, cmdset in enumerate(self.cmdset_stack):
mergetype = self.mergetype_stack[snum]
permstring = "non-perm"
if cmdset.permanent:
permstring = "perm"
if mergetype != cmdset.mergetype:
mergetype = "%s^" % (mergetype)
string += "\n %i: <%s (%s, prio %i, %s)>: %s" % \
(snum, cmdset.key, mergetype,
cmdset.priority, permstring, cmdset)
mergelist.append(str(snum))
string += "\n"
# Display the currently active cmdset, limited by self.obj's permissions
mergetype = self.mergetype_stack[-1]
if mergetype != self.current.mergetype:
merged_on = self.cmdset_stack[-2].key
mergetype = _("custom %(mergetype)s on cmdset '%(merged_on)s'") % \
{"mergetype": mergetype, "merged_on":merged_on}
if mergelist:
string += _(" <Merged %(mergelist)s (%(mergetype)s, prio %(prio)i)>: %(current)s") % \
{"mergelist": "+".join(mergelist),
"mergetype": mergetype, "prio": self.current.priority,
"current":self.current}
else:
permstring = "non-perm"
if self.current.permanent:
permstring = "perm"
string += _(" <%(key)s (%(mergetype)s, prio %(prio)i, %(permstring)s)>: %(keylist)s") % \
{"key": self.current.key, "mergetype": mergetype,
"prio": self.current.priority, "permstring": permstring,
"keylist": ", ".join(cmd.key for cmd in sorted(self.current, key=lambda o: o.key))}
return string.strip()
def _import_cmdset(self, cmdset_path, emit_to_obj=None):
"""
Method wrapper for import_cmdset.
load a cmdset from a module.
cmdset_path - the python path to an cmdset object.
emit_to_obj - object to send error messages to
"""
if not emit_to_obj:
emit_to_obj = self.obj
return import_cmdset(cmdset_path, self.obj, emit_to_obj)
def update(self, init_mode=False):
"""
Re-adds all sets in the handler to have an updated
current set.
init_mode is used right after this handler was
created; it imports all permanent cmdsets from db.
"""
if init_mode:
# reimport all permanent cmdsets
storage = self.obj.cmdset_storage
#print "cmdset_storage:", self.obj.cmdset_storage
if storage:
self.cmdset_stack = []
for pos, path in enumerate(storage):
if pos == 0 and not path:
self.cmdset_stack = [_EmptyCmdSet(cmdsetobj=self.obj)]
elif path:
cmdset = self._import_cmdset(path)
if cmdset:
cmdset.permanent = cmdset.key != '_CMDSET_ERROR'
self.cmdset_stack.append(cmdset)
# merge the stack into a new merged cmdset
new_current = None
self.mergetype_stack = []
for cmdset in self.cmdset_stack:
try:
# for cmdset's '+' operator, order matters.
new_current = cmdset + new_current
except TypeError:
continue
self.mergetype_stack.append(new_current.actual_mergetype)
self.current = new_current
def add(self, cmdset, emit_to_obj=None, permanent=False):
"""
Add a cmdset to the handler, on top of the old ones.
Default is to not make this permanent, i.e. the set
will not survive a server reset.
cmdset - can be a cmdset object or the python path to
such an object.
emit_to_obj - an object to receive error messages.
permanent - this cmdset will remain across a server reboot
Note: An interesting feature of this method is if you were to
send it an *already instantiated cmdset* (i.e. not a class),
the current cmdsethandler's obj attribute will then *not* be
transferred over to this already instantiated set (this is
because it might be used elsewhere and can cause strange effects).
This means you could in principle have the handler
launch command sets tied to a *different* object than the
handler. Not sure when this would be useful, but it's a 'quirk'
that has to be documented.
"""
if not (isinstance(cmdset, basestring) or utils.inherits_from(cmdset, CmdSet)):
raise Exception(_("Only CmdSets can be added to the cmdsethandler!"))
if callable(cmdset):
cmdset = cmdset(self.obj)
elif isinstance(cmdset, basestring):
# this is (maybe) a python path. Try to import from cache.
cmdset = self._import_cmdset(cmdset)
if cmdset and cmdset.key != '_CMDSET_ERROR':
if permanent and cmdset.key != '_CMDSET_ERROR':
# store the path permanently
cmdset.permanent = True
storage = self.obj.cmdset_storage
if not storage:
storage = ["", cmdset.path]
else:
storage.append(cmdset.path)
self.obj.cmdset_storage = storage
else:
cmdset.permanent = False
self.cmdset_stack.append(cmdset)
self.update()
def add_default(self, cmdset, emit_to_obj=None, permanent=True):
"""
Add a new default cmdset. If an old default existed,
it is replaced. If permanent is set, the set will survive a reboot.
cmdset - can be a cmdset object or the python path to
an instance of such an object.
emit_to_obj - an object to receive error messages.
permanent - save cmdset across reboots
See also the notes for self.add(), which applies here too.
"""
if callable(cmdset):
if not utils.inherits_from(cmdset, CmdSet):
raise Exception(_("Only CmdSets can be added to the cmdsethandler!"))
cmdset = cmdset(self.obj)
elif isinstance(cmdset, basestring):
# this is (maybe) a python path. Try to import from cache.
cmdset = self._import_cmdset(cmdset)
if cmdset and cmdset.key != '_CMDSET_ERROR':
if self.cmdset_stack:
self.cmdset_stack[0] = cmdset
self.mergetype_stack[0] = cmdset.mergetype
else:
self.cmdset_stack = [cmdset]
self.mergetype_stack = [cmdset.mergetype]
if permanent and cmdset.key != '_CMDSET_ERROR':
cmdset.permanent = True
storage = self.obj.cmdset_storage
if storage:
storage[0] = cmdset.path
else:
storage = [cmdset.path]
self.obj.cmdset_storage = storage
else:
cmdset.permanent = False
self.update()
def delete(self, cmdset=None):
"""
Remove a cmdset from the handler.
cmdset can be supplied either as a cmdset-key,
an instance of the CmdSet or a python path
to the cmdset. If no key is given,
the last cmdset in the stack is removed. Whenever
the cmdset_stack changes, the cmdset is updated.
The default cmdset (first entry in stack) is never
removed - remove it explicitly with delete_default.
"""
if len(self.cmdset_stack) < 2:
# don't allow deleting default cmdsets here.
return
if not cmdset:
# remove the last one in the stack
cmdset = self.cmdset_stack.pop()
if cmdset.permanent:
storage = self.obj.cmdset_storage
storage.pop()
self.obj.cmdset_storage = storage
else:
# try it as a callable
if callable(cmdset) and hasattr(cmdset, 'path'):
delcmdsets = [cset for cset in self.cmdset_stack[1:]
if cset.path == cmdset.path]
else:
# try it as a path or key
delcmdsets = [cset for cset in self.cmdset_stack[1:]
if cset.path == cmdset or cset.key == cmdset]
storage = []
if any(cset.permanent for cset in delcmdsets):
# only hit database if there's need to
storage = self.obj.cmdset_storage
for cset in delcmdsets:
if cset.permanent:
try:
storage.remove(cset.path)
except ValueError:
pass
for cset in delcmdsets:
# clean the in-memory stack
try:
self.cmdset_stack.remove(cset)
except ValueError:
pass
# re-sync the cmdsethandler.
self.update()
def delete_default(self):
"""
This explicitly deletes the default cmdset. It's the
only command that can.
"""
if self.cmdset_stack:
cmdset = self.cmdset_stack[0]
if cmdset.permanent:
storage = self.obj.cmdset_storage
if storage:
storage[0] = ""
else:
storage = [""]
self.cmdset_storage = storage
self.cmdset_stack[0] = _EmptyCmdSet(cmdsetobj=self.obj)
else:
self.cmdset_stack = [_EmptyCmdSet(cmdsetobj=self.obj)]
self.update()
def all(self):
"""
Returns the list of cmdsets. Mostly useful to check
if stack if empty or not.
"""
return self.cmdset_stack
def clear(self):
"""
Removes all extra Command sets from the handler, leaving only the
default one.
"""
self.cmdset_stack = [self.cmdset_stack[0]]
self.mergetype_stack = [self.cmdset_stack[0].mergetype]
storage = self.obj.cmdset_storage
if storage:
storage = storage[0]
self.obj.cmdset_storage = storage
self.update()
def has_cmdset(self, cmdset_key, must_be_default=False):
"""
checks so the cmdsethandler contains a cmdset with the given key.
must_be_default - only match against the default cmdset.
"""
if must_be_default:
return self.cmdset_stack and self.cmdset_stack[0].key == cmdset_key
else:
return any([cmdset.key == cmdset_key for cmdset in self.cmdset_stack])
def reset(self):
"""
Force reload of all cmdsets in handler. This should be called
after _CACHED_CMDSETS have been cleared (normally by @reload).
"""
new_cmdset_stack = []
new_mergetype_stack = []
for cmdset in self.cmdset_stack:
if cmdset.key == "_EMPTY_CMDSET":
new_cmdset_stack.append(cmdset)
new_mergetype_stack.append("Union")
else:
new_cmdset_stack.append(self._import_cmdset(cmdset.path))
new_mergetype_stack.append(cmdset.mergetype)
self.cmdset_stack = new_cmdset_stack
self.mergetype_stack = new_mergetype_stack
self.update()
| |
# For the makeReport example
from treedict import TreeDict
t = TreeDict("mytree")
t.x = 1
t.y = 2
t.a.z = [1,2,3]
t.a.y = {1 : 2}
t.b.x = "hello"
t.a.x = None
t.b.z = 2
print t.makeReport()
print t.a.makeReport()
print t.a.makeReport(add_path = True)
print t.a.makeReport(add_path = True, add_tree_name = False)
# For set()
from treedict import TreeDict
t = TreeDict()
t.set("x", 1)
t.set(z = 3)
t.set("ya", 2, "yb", 2, yc = 3)
t.set("a.b.c.v", 1)
print t.makeReport()
# for set
from treedict import TreeDict
t = TreeDict()
t.set("x", 1)
print t.makeReport()
t.set("a", 3, "b", 4, "1badvalue", 5)
print t.makeReport()
# For attach
from treedict import TreeDict
t = TreeDict('root')
t1 = TreeDict('t1')
t.attach(t1, copy = True)
t1.rootNode()
t.t1 is t1
t.t1.rootNode()
from treedict import TreeDict
t = TreeDict('root')
t1 = TreeDict('t1')
t.attach(t1, name = "new_t1", copy = False)
t1.rootNode()
t.new_t1 is t1
from treedict import TreeDict
t = TreeDict('root')
t1 = TreeDict('t1', x1 = 1, y1 = 2)
t2 = TreeDict('t2', x2 = 10, y2 = 20)
t.a = 1
t.t1 = t1
t.attach(t2)
print t.makeReport()
t.attach(recursive = True)
print t.makeReport()
#########################################
# For branch name
from treedict import TreeDict
t = TreeDict('root')
t.makeBranch("a.b.c")
t.a.b.c.branchName()
t.a.b.c.branchName(add_path = True)
t.a.b.c.branchName(add_path = True, add_tree_name = True)
########################################
# Branches
from treedict import TreeDict
t = TreeDict()
t.set('a.b', 1, 'b.c', 2, x = 1, y = 2)
print t.makeReport()
list(t.iterbranches())
from treedict import TreeDict
t = TreeDict()
t.set('a.b', 1, 'b.c', 2, x = 1, y = 2)
print t.makeReport()
t.branches()
########################################
# Clear
from treedict import TreeDict
t = TreeDict() ; t.set('a.b', 1, 'b.c', 2, x = 1, y = 2)
print t.makeReport()
t1 = t.copy() ; t2 = t.copy()
t.clear()
t.isEmpty()
t1.clear(branch_mode = 'none')
print t1.makeReport()
t2.clear(branch_mode = 'only')
print t2.makeReport()
########################################
# fromkeys
from treedict import TreeDict
t = TreeDict.fromkeys(['a', 'b', 'c'])
print t.makeReport()
from treedict import TreeDict
t = TreeDict.fromkeys('abc', 'abc')
print t.makeReport()
########################################
# setdefault
from treedict import TreeDict
t = TreeDict(x = 1)
print t.makeReport()
t.setdefault("x", 2)
t.setdefault("y", 2)
print t.makeReport()
########################################
# get
from treedict import TreeDict
t = TreeDict(x = 1)
t.get("x")
t.get("y")
t.get("y", [])
########################################
# getClosestKey
t = TreeDict()
t.alpha.x1 = 1
t.alpha.y1 = 1
t.alpha.zzz = 1
t.beta.x = 1
t.gamma.beta = 1
"alpah.x" in t
t.getClosestKey("alpah.x")
t.getClosestKey("alpah.x", 1)
t.getClosestKey("alpah.x", 3)
########################################
# hash
from treedict import TreeDict
t = TreeDict()
t.set('br.x', 1, 'br.c.y', 2, x = 1, y = 2)
t.hash()
t.hash('br')
t.br.hash()
t.br.hash(add_name = True)
t.hash('x')
t.hash(keys = ['x', 'y'])
t.hash('nothere')
########################################
# treeName
from treedict import TreeDict
t = TreeDict('mytree')
t.treeName()
t.makeBranch('a.b.c')
t.a.b.treeName()
########################################
# iteritems
from treedict import TreeDict
t = TreeDict() ; t.set('b.x', 1, 'b.c.y', 2, x = 1)
print t.makeReport()
list(t.iteritems())
list(t.iteritems(recursive=False))
list(t.iteritems(recursive=False, branch_mode='none'))
list(t.iteritems(recursive=False, branch_mode='only'))
list(t.iteritems(recursive=False, branch_mode='all'))
list(t.iteritems(recursive=True, branch_mode='only'))
list(t.iteritems(recursive=True, branch_mode='all'))
########################################
# iterkeys
from treedict import TreeDict
t = TreeDict() ; t.set('b.x', 1, 'b.c.y', 2, x = 1)
print t.makeReport()
list(t.iterkeys())
list(t.iterkeys(recursive=False))
list(t.iterkeys(recursive=False, branch_mode='none'))
list(t.iterkeys(recursive=False, branch_mode='only'))
list(t.iterkeys(recursive=False, branch_mode='all'))
list(t.iterkeys(recursive=True, branch_mode='only'))
list(t.iterkeys(recursive=True, branch_mode='all'))
########################################
# itervalues
from treedict import TreeDict
t = TreeDict() ; t.set('b.x', 1, 'b.c.y', 2, x = 1)
print t.makeReport()
list(t.itervalues())
list(t.itervalues(recursive=False))
list(t.itervalues(recursive=False, branch_mode='none'))
list(t.itervalues(recursive=False, branch_mode='only'))
list(t.itervalues(recursive=False, branch_mode='all'))
list(t.itervalues(recursive=True, branch_mode='only'))
list(t.itervalues(recursive=True, branch_mode='all'))
########################################
# pop
from treedict import TreeDict
t = TreeDict()
t.b.c.d.e.y = 2
t.b.c.d.e.pop('y')
"b.c.d.e.y" in t
"b.c.d.e" in t
t.b.c.d.e.pop()
"b.c.d.e" in t
"b.c.d" in t
t.b.c.d.pop(prune_empty = True)
"b.c.d" in t
"b.c" in t
t.isEmpty()
t.pop('nothere')
t.pop('nothere', silent=True)
########################################
# size
from treedict import TreeDict
t = TreeDict() ; t.set('b.x', 1, 'b.c.y', 2, x = 1)
print t.makeReport()
t.size()
t.size(recursive=False)
t.size(recursive=False, branch_mode='none')
t.size(recursive=False, branch_mode='only')
t.size(recursive=False, branch_mode='all')
t.size(recursive=True, branch_mode='only')
t.size(recursive=True, branch_mode='all')
########################################
# setFromString
from treedict import TreeDict
t = TreeDict()
t.setFromString('x', '1')
t.setFromString('y', '(1,2,["abc",None])')
t.setFromString('z', '{"abc" : 1}')
print t.makeReport()
########################################
# __call__
from treedict import TreeDict
t = TreeDict()
t('a.b.x', 1, x = 2)
print t.makeReport()
t(y = 3)(z = 4)
print t.makeReport()
########################################
# The globals
t = getTree("default_parameters")
t.verbose = False
t.run_mode = t.chug
t.chug.action = "drink"
t.chug.quantity = "lots"
t.sip.action = "drink"
t.sip.quantity = "a little"
if True:
def run(run_parameters):
t = getTree("default_parameters")
t.update(parameters)
# The following will print "drink lots" unless overridden by run_parameters
print t.run_mode.action, t.run_mode.quantity
########################################
#
from treedict import TreeDict
t = TreeDict()
# Attribute-style and dict-style setting are interchangable.
t["run.action"] = True
t.run.time_of_day = "Morning"
# Intermediate branches are implicitly created
t.programmer.habits.morning = ["drink coffee", "Read xkcd"]
# read_xkcd is implicitly created here, but isn't really part of the
# tree until later
t.action = t.read_xkcd
# This attaches the dangling branch read_xkcd above
t.read_xkcd.description = "Go to www.xkcd.com and read."
t.read_xkcd.expected_time = "5 minutes."
########################################
# Dict example
from treedict import TreeDict
d = {"x" : 1, "y" : 2, "a.b.x" : 3, "a.b.c.y" : 4}
t = TreeDict()
t.update(d)
print t.makeReport()
from treedict import TreeDict
t = TreeDict() ; t.set("x" , 1, "y" , 2, "a.b.x", 3, "a.b.c.y", 4)
dict(t.iteritems())
from treedict import TreeDict
t = TreeDict()
t.a.b.x = 1
t.a.c.x = 2
t.d.y = 3
t.items()
t.a.items()
t.a.b.items()
########################################
# Memoize decorator
from treedict import TreeDict
class memoized_with_treedict(object):
"""
Based on 'memoized' python decorator from
http://wiki.python.org/moin/PythonDecoratorLibrary.
Decorator that caches a function's return value each time it is
called. If called later with the same arguments, the cached value
is returned, and not re-evaluated. In this case, TreeDicts are
both allowed as arguments and used to allow mutable arguments as
types.
"""
def __init__(self, func):
self.func = func
# replace "self.cache = {}" with something like:
#
# self.cache = shelve.open(getTree("global_options").cache_file)
# atexit.register(lambda: self.cache.close())
#
# to get long-term persistence
self.cache = {}
def __call__(self, *args, **kwargs):
# Use TreeDict to allow for mutable parameters / kwargs
kw_t = TreeDict(**kwargs)
arg_t = TreeDict(args = args)
cache_key = ( kw_t.hash(), arg_t.hash())
try:
return self.cache[cache_key]
except KeyError:
self.cache[cache_key] = value = self.func(*args, **kwargs)
return value
@memoized_with_treedict
def weird_fibonacci(n, t):
"""
Fibonacci numbers modified so all results are shifted by t.shift,
and numbers less than t.start are returned as themselves plus the
shift. Demonstrates the use of TreeDict to control options in a
memoized function. t.start defaults to 1 and t.shift defaults to 0.
"""
start = max(1, t.get("start", 1))
shift = t.get("shift", 0)
if n <= start:
return n + shift
else:
return fibonacci(n-1, t) + fibonacci(n-2, t) + shift
| |
"""Use pika with the Asyncio EventLoop"""
import asyncio
import logging
import sys
from pika.adapters import base_connection
from pika.adapters.utils import nbio_interface, io_services_utils
LOGGER = logging.getLogger(__name__)
if sys.platform == 'win32':
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
class AsyncioConnection(base_connection.BaseConnection):
""" The AsyncioConnection runs on the Asyncio EventLoop.
"""
def __init__(self,
parameters=None,
on_open_callback=None,
on_open_error_callback=None,
on_close_callback=None,
custom_ioloop=None,
internal_connection_workflow=True):
""" Create a new instance of the AsyncioConnection class, connecting
to RabbitMQ automatically
:param pika.connection.Parameters parameters: Connection parameters
:param callable on_open_callback: The method to call when the connection
is open
:param None | method on_open_error_callback: Called if the connection
can't be established or connection establishment is interrupted by
`Connection.close()`: on_open_error_callback(Connection, exception).
:param None | method on_close_callback: Called when a previously fully
open connection is closed:
`on_close_callback(Connection, exception)`, where `exception` is
either an instance of `exceptions.ConnectionClosed` if closed by
user or broker or exception of another type that describes the cause
of connection failure.
:param None | asyncio.AbstractEventLoop |
nbio_interface.AbstractIOServices custom_ioloop:
Defaults to asyncio.get_event_loop().
:param bool internal_connection_workflow: True for autonomous connection
establishment which is default; False for externally-managed
connection workflow via the `create_connection()` factory.
"""
if isinstance(custom_ioloop, nbio_interface.AbstractIOServices):
nbio = custom_ioloop
else:
nbio = _AsyncioIOServicesAdapter(custom_ioloop)
super().__init__(
parameters,
on_open_callback,
on_open_error_callback,
on_close_callback,
nbio,
internal_connection_workflow=internal_connection_workflow)
@classmethod
def create_connection(cls,
connection_configs,
on_done,
custom_ioloop=None,
workflow=None):
"""Implement
:py:classmethod:`pika.adapters.BaseConnection.create_connection()`.
"""
nbio = _AsyncioIOServicesAdapter(custom_ioloop)
def connection_factory(params):
"""Connection factory."""
if params is None:
raise ValueError('Expected pika.connection.Parameters '
'instance, but got None in params arg.')
return cls(
parameters=params,
custom_ioloop=nbio,
internal_connection_workflow=False)
return cls._start_connection_workflow(
connection_configs=connection_configs,
connection_factory=connection_factory,
nbio=nbio,
workflow=workflow,
on_done=on_done)
class _AsyncioIOServicesAdapter(io_services_utils.SocketConnectionMixin,
io_services_utils.StreamingConnectionMixin,
nbio_interface.AbstractIOServices,
nbio_interface.AbstractFileDescriptorServices):
"""Implements
:py:class:`.utils.nbio_interface.AbstractIOServices` interface
on top of `asyncio`.
NOTE:
:py:class:`.utils.nbio_interface.AbstractFileDescriptorServices`
interface is only required by the mixins.
"""
def __init__(self, loop=None):
"""
:param asyncio.AbstractEventLoop | None loop: If None, gets default
event loop from asyncio.
"""
self._loop = loop or asyncio.get_event_loop()
def get_native_ioloop(self):
"""Implement
:py:meth:`.utils.nbio_interface.AbstractIOServices.get_native_ioloop()`.
"""
return self._loop
def close(self):
"""Implement
:py:meth:`.utils.nbio_interface.AbstractIOServices.close()`.
"""
self._loop.close()
def run(self):
"""Implement :py:meth:`.utils.nbio_interface.AbstractIOServices.run()`.
"""
self._loop.run_forever()
def stop(self):
"""Implement :py:meth:`.utils.nbio_interface.AbstractIOServices.stop()`.
"""
self._loop.stop()
def add_callback_threadsafe(self, callback):
"""Implement
:py:meth:`.utils.nbio_interface.AbstractIOServices.add_callback_threadsafe()`.
"""
self._loop.call_soon_threadsafe(callback)
def call_later(self, delay, callback):
"""Implement
:py:meth:`.utils.nbio_interface.AbstractIOServices.call_later()`.
"""
return _TimerHandle(self._loop.call_later(delay, callback))
def getaddrinfo(self,
host,
port,
on_done,
family=0,
socktype=0,
proto=0,
flags=0):
"""Implement
:py:meth:`.utils.nbio_interface.AbstractIOServices.getaddrinfo()`.
"""
return self._schedule_and_wrap_in_io_ref(
self._loop.getaddrinfo(
host,
port,
family=family,
type=socktype,
proto=proto,
flags=flags), on_done)
def set_reader(self, fd, on_readable):
"""Implement
:py:meth:`.utils.nbio_interface.AbstractFileDescriptorServices.set_reader()`.
"""
self._loop.add_reader(fd, on_readable)
LOGGER.debug('set_reader(%s, _)', fd)
def remove_reader(self, fd):
"""Implement
:py:meth:`.utils.nbio_interface.AbstractFileDescriptorServices.remove_reader()`.
"""
LOGGER.debug('remove_reader(%s)', fd)
return self._loop.remove_reader(fd)
def set_writer(self, fd, on_writable):
"""Implement
:py:meth:`.utils.nbio_interface.AbstractFileDescriptorServices.set_writer()`.
"""
self._loop.add_writer(fd, on_writable)
LOGGER.debug('set_writer(%s, _)', fd)
def remove_writer(self, fd):
"""Implement
:py:meth:`.utils.nbio_interface.AbstractFileDescriptorServices.remove_writer()`.
"""
LOGGER.debug('remove_writer(%s)', fd)
return self._loop.remove_writer(fd)
def _schedule_and_wrap_in_io_ref(self, coro, on_done):
"""Schedule the coroutine to run and return _AsyncioIOReference
:param coroutine-obj coro:
:param callable on_done: user callback that takes the completion result
or exception as its only arg. It will not be called if the operation
was cancelled.
:rtype: _AsyncioIOReference which is derived from
nbio_interface.AbstractIOReference
"""
if not callable(on_done):
raise TypeError(
'on_done arg must be callable, but got {!r}'.format(on_done))
return _AsyncioIOReference(
asyncio.ensure_future(coro, loop=self._loop), on_done)
class _TimerHandle(nbio_interface.AbstractTimerReference):
"""This module's adaptation of `nbio_interface.AbstractTimerReference`.
"""
def __init__(self, handle):
"""
:param asyncio.Handle handle:
"""
self._handle = handle
def cancel(self):
if self._handle is not None:
self._handle.cancel()
self._handle = None
class _AsyncioIOReference(nbio_interface.AbstractIOReference):
"""This module's adaptation of `nbio_interface.AbstractIOReference`.
"""
def __init__(self, future, on_done):
"""
:param asyncio.Future future:
:param callable on_done: user callback that takes the completion result
or exception as its only arg. It will not be called if the operation
was cancelled.
"""
if not callable(on_done):
raise TypeError(
'on_done arg must be callable, but got {!r}'.format(on_done))
self._future = future
def on_done_adapter(future):
"""Handle completion callback from the future instance"""
# NOTE: Asyncio schedules callback for cancelled futures, but pika
# doesn't want that
if not future.cancelled():
on_done(future.exception() or future.result())
future.add_done_callback(on_done_adapter)
def cancel(self):
"""Cancel pending operation
:returns: False if was already done or cancelled; True otherwise
:rtype: bool
"""
return self._future.cancel()
| |
""" A simple way of interacting to a ethereum node through JSON RPC commands. """
from __future__ import print_function
from builtins import map
from builtins import str
from builtins import object
import logging
import warnings
import json
import gevent
from ethereum.abi import ContractTranslator
from ethereum.tools.keys import privtoaddr
from ethereum.transactions import Transaction
from ethereum.utils import (
denoms,
int_to_big_endian,
big_endian_to_int,
normalize_address,
decode_hex,
encode_hex,
)
from ethereum.tools._solidity import solidity_unresolved_symbols, solidity_library_symbol, solidity_resolve_symbols
from tinyrpc.protocols.jsonrpc import JSONRPCErrorResponse, JSONRPCSuccessResponse
from tinyrpc.protocols.jsonrpc import JSONRPCProtocol
from tinyrpc.transports.http import HttpPostClientTransport
from pyethapp.jsonrpc import address_encoder as _address_encoder
from pyethapp.jsonrpc import (
data_encoder, data_decoder, address_decoder, default_gasprice,
default_startgas, quantity_encoder, quantity_decoder,
)
# pylint: disable=invalid-name,too-many-arguments,too-few-public-methods
# The number of arguments an it's names are determined by the JSON-RPC spec
z_address = b'\x00' * 20
log = logging.getLogger(__name__)
def address_encoder(address):
""" Normalize address and hex encode it with the additional of the '0x'
prefix.
"""
normalized_address = normalize_address(address, allow_blank=True)
return _address_encoder(normalized_address)
def block_tag_encoder(val):
if isinstance(val, int):
return quantity_encoder(val)
elif val and isinstance(val, bytes):
assert val in ('latest', 'pending')
return data_encoder(val)
else:
assert not val
def topic_encoder(topic):
assert isinstance(topic, int)
return data_encoder(int_to_big_endian(topic))
def topic_decoder(topic):
return big_endian_to_int(data_decoder(topic))
def deploy_dependencies_symbols(all_contract):
dependencies = {}
symbols_to_contract = dict()
for contract_name in all_contract:
symbol = solidity_library_symbol(contract_name)
if symbol in symbols_to_contract:
raise ValueError('Conflicting library names.')
symbols_to_contract[symbol] = contract_name
for contract_name, contract in list(all_contract.items()):
unresolved_symbols = solidity_unresolved_symbols(contract['bin_hex'])
dependencies[contract_name] = [
symbols_to_contract[unresolved]
for unresolved in unresolved_symbols
]
return dependencies
def dependencies_order_of_build(target_contract, dependencies_map):
""" Return an ordered list of contracts that is sufficient to sucessfully
deploys the target contract.
Note:
This function assumes that the `dependencies_map` is an acyclic graph.
"""
if len(dependencies_map) == 0:
return [target_contract]
if target_contract not in dependencies_map:
raise ValueError('no dependencies defined for {}'.format(target_contract))
order = [target_contract]
todo = list(dependencies_map[target_contract])
while len(todo):
target_contract = todo.pop(0)
target_pos = len(order)
for dependency in dependencies_map[target_contract]:
# we need to add the current contract before all it's depedencies
if dependency in order:
target_pos = order.index(dependency)
else:
todo.append(dependency)
order.insert(target_pos, target_contract)
order.reverse()
return order
class JSONRPCClientReplyError(Exception):
pass
class JSONRPCClient(object):
protocol = JSONRPCProtocol()
def __init__(self, host='127.0.0.1', port=4000, print_communication=True,
privkey=None, sender=None, use_ssl=False, transport=None):
"""
Args:
host (str): host address to connect to.
port (int): port number to connect to.
print_communication (bool): True to print the rpc communication.
privkey: specify privkey for local signing
sender (address): the sender address, computed from privkey if provided.
use_ssl (bool): Use https instead of http.
transport: Tiny rpc transport instance.
"""
if transport is None:
self.transport = HttpPostClientTransport('{}://{}:{}'.format(
'https' if use_ssl else 'http', host, port), headers={'content-type': 'application/json'})
else:
self.transport = transport
self.print_communication = print_communication
self.privkey = privkey
self._sender = sender
self.port = port
def __repr__(self):
return '<JSONRPCClient @%d>' % self.port
@property
def sender(self):
if self.privkey:
return privtoaddr(self.privkey)
if self._sender is None:
self._sender = self.coinbase
return self._sender
@property
def coinbase(self):
""" Return the client coinbase address. """
return address_decoder(self.call('eth_coinbase'))
def blocknumber(self):
""" Return the most recent block. """
return quantity_decoder(self.call('eth_blockNumber'))
def nonce(self, address):
if len(address) == 40:
address = decode_hex(address)
try:
res = self.call('eth_nonce', address_encoder(address), 'pending')
return quantity_decoder(res)
except JSONRPCClientReplyError as e:
if e.message == 'Method not found':
raise JSONRPCClientReplyError(
"'eth_nonce' is not supported by your endpoint (pyethapp only). "
"For transactions use server-side nonces: "
"('eth_sendTransaction' with 'nonce=None')")
raise e
def balance(self, account):
""" Return the balance of the account of given address. """
res = self.call('eth_getBalance', address_encoder(account), 'pending')
return quantity_decoder(res)
def gaslimit(self):
return quantity_decoder(self.call('eth_gasLimit'))
def lastgasprice(self):
return quantity_decoder(self.call('eth_lastGasPrice'))
def new_abi_contract(self, contract_interface, address):
warnings.warn('deprecated, use new_contract_proxy', DeprecationWarning)
return self.new_contract_proxy(contract_interface, address)
def new_contract_proxy(self, contract_interface, address):
""" Return a proxy for interacting with a smart contract.
Args:
contract_interface: The contract interface as defined by the json.
address: The contract's address.
"""
sender = self.sender or privtoaddr(self.privkey)
return ContractProxy(
sender,
contract_interface,
address,
self.eth_call,
self.send_transaction,
self.eth_estimateGas,
)
def deploy_solidity_contract(self, sender, contract_name, all_contracts, # pylint: disable=too-many-locals
libraries, constructor_parameters, timeout=None, gasprice=default_gasprice):
if contract_name not in all_contracts:
raise ValueError('Unkonwn contract {}'.format(contract_name))
libraries = dict(libraries)
contract = all_contracts[contract_name]
contract_interface = contract['abi']
symbols = solidity_unresolved_symbols(contract['bin_hex'])
if symbols:
available_symbols = list(map(solidity_library_symbol, list(all_contracts.keys()))) # pylint: disable=bad-builtin
unknown_symbols = set(symbols) - set(available_symbols)
if unknown_symbols:
msg = 'Cannot deploy contract, known symbols {}, unresolved symbols {}.'.format(
available_symbols,
unknown_symbols,
)
raise Exception(msg)
dependencies = deploy_dependencies_symbols(all_contracts)
deployment_order = dependencies_order_of_build(contract_name, dependencies)
deployment_order.pop() # remove `contract_name` from the list
log.debug('Deploing dependencies: {}'.format(str(deployment_order)))
for deploy_contract in deployment_order:
dependency_contract = all_contracts[deploy_contract]
hex_bytecode = solidity_resolve_symbols(dependency_contract['bin_hex'], libraries)
bytecode = decode_hex(hex_bytecode)
dependency_contract['bin_hex'] = hex_bytecode
dependency_contract['bin'] = bytecode
transaction_hash_hex = self.send_transaction(
sender,
to='',
data=bytecode,
gasprice=gasprice,
)
transaction_hash = decode_hex(transaction_hash_hex)
self.poll(transaction_hash, timeout=timeout)
receipt = self.eth_getTransactionReceipt(transaction_hash)
contract_address = receipt['contractAddress']
contract_address = contract_address[2:] # remove the hexadecimal prefix 0x from the address
libraries[deploy_contract] = contract_address
deployed_code = self.eth_getCode(decode_hex(contract_address))
if deployed_code == '0x':
raise RuntimeError("Contract address has no code, check gas usage.")
hex_bytecode = solidity_resolve_symbols(contract['bin_hex'], libraries)
bytecode = decode_hex(hex_bytecode)
contract['bin_hex'] = hex_bytecode
contract['bin'] = bytecode
if constructor_parameters:
translator = ContractTranslator(contract_interface)
parameters = translator.encode_constructor_arguments(constructor_parameters)
bytecode = contract['bin'] + parameters
else:
bytecode = contract['bin']
transaction_hash_hex = self.send_transaction(
sender,
to='',
data=bytecode,
gasprice=gasprice,
)
transaction_hash = decode_hex(transaction_hash_hex)
self.poll(transaction_hash, timeout=timeout)
receipt = self.eth_getTransactionReceipt(transaction_hash)
contract_address = receipt['contractAddress']
deployed_code = self.eth_getCode(decode_hex(contract_address[2:]))
if deployed_code == '0x':
raise RuntimeError("Deployment of {} failed. Contract address has no code, check gas usage.".format(
contract_name
))
return self.new_contract_proxy(
contract_interface,
contract_address,
)
def find_block(self, condition):
"""Query all blocks one by one and return the first one for which
`condition(block)` evaluates to `True`.
"""
i = 0
while True:
block = self.call('eth_getBlockByNumber', quantity_encoder(i), True)
if condition(block) or not block:
return block
i += 1
def new_filter(self, fromBlock=None, toBlock=None, address=None, topics=None):
""" Creates a filter object, based on filter options, to notify when
the state changes (logs). To check if the state has changed, call
eth_getFilterChanges.
"""
json_data = {
'fromBlock': block_tag_encoder(fromBlock or ''),
'toBlock': block_tag_encoder(toBlock or ''),
}
if address is not None:
json_data['address'] = address_encoder(address)
if topics is not None:
if not isinstance(topics, list):
raise ValueError('topics must be a list')
json_data['topics'] = [topic_encoder(topic) for topic in topics]
filter_id = self.call('eth_newFilter', json_data)
return quantity_decoder(filter_id)
def filter_changes(self, fid):
changes = self.call('eth_getFilterChanges', quantity_encoder(fid))
if not changes:
return None
elif isinstance(changes, bytes):
return data_decoder(changes)
else:
decoders = dict(blockHash=data_decoder,
transactionHash=data_decoder,
data=data_decoder,
address=address_decoder,
topics=lambda x: [topic_decoder(t) for t in x],
blockNumber=quantity_decoder,
logIndex=quantity_decoder,
transactionIndex=quantity_decoder)
return [{k: decoders[k](v) for k, v in list(c.items()) if v is not None} for c in changes]
def call(self, method, *args):
""" Do the request and returns the result.
Args:
method (str): The RPC method.
args: The encoded arguments expected by the method.
- Object arguments must be supplied as an dictionary.
- Quantity arguments must be hex encoded starting with '0x' and
without left zeros.
- Data arguments must be hex encoded starting with '0x'
"""
request = self.protocol.create_request(method, args)
reply = self.transport.send_message(request.serialize())
if self.print_communication:
print(json.dumps(json.loads(request.serialize()), indent=2))
print(reply)
jsonrpc_reply = self.protocol.parse_reply(reply)
if isinstance(jsonrpc_reply, JSONRPCSuccessResponse):
return jsonrpc_reply.result
elif isinstance(jsonrpc_reply, JSONRPCErrorResponse):
raise JSONRPCClientReplyError(jsonrpc_reply.error)
else:
raise JSONRPCClientReplyError('Unknown type of JSONRPC reply')
__call__ = call
def send_transaction(self, sender, to, value=0, data='', startgas=0,
gasprice=10 * denoms.szabo, nonce=None):
""" Helper to send signed messages.
This method will use the `privkey` provided in the constructor to
locally sign the transaction. This requires an extended server
implementation that accepts the variables v, r, and s.
"""
if not self.privkey and not sender:
raise ValueError('Either privkey or sender needs to be supplied.')
if self.privkey and not sender:
sender = privtoaddr(self.privkey)
if nonce is None:
nonce = self.nonce(sender)
elif self.privkey:
if sender != privtoaddr(self.privkey):
raise ValueError('sender for a different privkey.')
if nonce is None:
nonce = self.nonce(sender)
else:
if nonce is None:
nonce = 0
if not startgas:
startgas = self.gaslimit() - 1
tx = Transaction(nonce, gasprice, startgas, to=to, value=value, data=data)
if self.privkey:
# add the fields v, r and s
tx.sign(self.privkey)
tx_dict = tx.to_dict()
# Transaction.to_dict() encodes 'data', so we need to decode it here.
tx_dict['data'] = data_decoder(tx_dict['data'])
# rename the fields to match the eth_sendTransaction signature
tx_dict.pop('hash')
tx_dict['sender'] = sender
tx_dict['gasPrice'] = tx_dict.pop('gasprice')
tx_dict['gas'] = tx_dict.pop('startgas')
res = self.eth_sendTransaction(**tx_dict)
assert len(res) in (20, 32)
return encode_hex(res)
def eth_sendTransaction(self, nonce=None, sender='', to='', value=0, data='',
gasPrice=default_gasprice, gas=default_startgas,
v=None, r=None, s=None):
""" Creates new message call transaction or a contract creation, if the
data field contains code.
Note:
The support for local signing through the variables v,r,s is not
part of the standard spec, a extended server is required.
Args:
from (address): The 20 bytes address the transaction is send from.
to (address): DATA, 20 Bytes - (optional when creating new
contract) The address the transaction is directed to.
gas (int): Gas provided for the transaction execution. It will
return unused gas.
gasPrice (int): gasPrice used for each paid gas.
value (int): Value send with this transaction.
data (bin): The compiled code of a contract OR the hash of the
invoked method signature and encoded parameters.
nonce (int): This allows to overwrite your own pending transactions
that use the same nonce.
"""
if to == '' and data.isalnum():
warnings.warn(
'Verify that the data parameter is _not_ hex encoded, if this is the case '
'the data will be double encoded and result in unexpected '
'behavior.'
)
if to == '0' * 40:
warnings.warn('For contract creating the empty string must be used.')
json_data = {
'to': data_encoder(normalize_address(to, allow_blank=True)),
'value': quantity_encoder(value),
'gasPrice': quantity_encoder(gasPrice),
'gas': quantity_encoder(gas),
'data': data_encoder(data),
}
if not sender and not (v and r and s):
raise ValueError('Either sender or v, r, s needs to be informed.')
if sender is not None:
json_data['from'] = address_encoder(sender)
if v and r and s:
json_data['v'] = quantity_encoder(v)
json_data['r'] = quantity_encoder(r)
json_data['s'] = quantity_encoder(s)
if nonce is not None:
json_data['nonce'] = quantity_encoder(nonce)
res = self.call('eth_sendTransaction', json_data)
return data_decoder(res)
def _format_call(self, sender='', to='', value=0, data='',
startgas=default_startgas, gasprice=default_gasprice):
""" Helper to format the transaction data. """
json_data = dict()
if sender is not None:
json_data['from'] = address_encoder(sender)
if to is not None:
json_data['to'] = data_encoder(to)
if value is not None:
json_data['value'] = quantity_encoder(value)
if gasprice is not None:
json_data['gasPrice'] = quantity_encoder(gasprice)
if startgas is not None:
json_data['gas'] = quantity_encoder(startgas)
if data is not None:
json_data['data'] = data_encoder(data)
return json_data
def eth_call(self, sender='', to='', value=0, data='',
startgas=default_startgas, gasprice=default_gasprice,
block_number='latest'):
""" Executes a new message call immediately without creating a
transaction on the block chain.
Args:
from: The address the transaction is send from.
to: The address the transaction is directed to.
gas (int): Gas provided for the transaction execution. eth_call
consumes zero gas, but this parameter may be needed by some
executions.
gasPrice (int): gasPrice used for each paid gas.
value (int): Integer of the value send with this transaction.
data (bin): Hash of the method signature and encoded parameters.
For details see Ethereum Contract ABI.
block_number: Determines the state of ethereum used in the
call.
"""
json_data = self._format_call(
sender,
to,
value,
data,
startgas,
gasprice,
)
res = self.call('eth_call', json_data, block_number)
return data_decoder(res)
def eth_estimateGas(self, sender='', to='', value=0, data='',
startgas=default_startgas, gasprice=default_gasprice):
""" Makes a call or transaction, which won't be added to the blockchain
and returns the used gas, which can be used for estimating the used
gas.
Args:
from: The address the transaction is send from.
to: The address the transaction is directed to.
gas (int): Gas provided for the transaction execution. eth_call
consumes zero gas, but this parameter may be needed by some
executions.
gasPrice (int): gasPrice used for each paid gas.
value (int): Integer of the value send with this transaction.
data (bin): Hash of the method signature and encoded parameters.
For details see Ethereum Contract ABI.
block_number: Determines the state of ethereum used in the
call.
"""
json_data = self._format_call(
sender,
to,
value,
data,
startgas,
gasprice,
)
res = self.call('eth_estimateGas', json_data)
return quantity_decoder(res)
def eth_getTransactionReceipt(self, transaction_hash):
""" Returns the receipt of a transaction by transaction hash.
Args:
transaction_hash: Hash of a transaction.
Returns:
A dict representing the transaction receipt object, or null when no
receipt was found.
"""
if transaction_hash.startswith('0x'):
warnings.warn(
'transaction_hash seems to be already encoded, this will'
' result in unexpected behavior'
)
if len(transaction_hash) != 32:
raise ValueError(
'transaction_hash length must be 32 (it might be hex encode)'
)
transaction_hash = data_encoder(transaction_hash)
return self.call('eth_getTransactionReceipt', transaction_hash)
def eth_getCode(self, address, block='latest'):
""" Returns code at a given address.
Args:
address: An address.
block_number: Integer block number, or the string "latest",
"earliest" or "pending".
"""
if address.startswith('0x'):
warnings.warn(
'address seems to be already encoded, this will result '
'in unexpected behavior'
)
if len(address) != 20:
raise ValueError(
'address length must be 20 (it might be hex encode)'
)
return self.call(
'eth_getCode',
address_encoder(address),
block,
)
def eth_getTransactionByHash(self, transaction_hash):
""" Returns the information about a transaction requested by
transaction hash.
"""
if transaction_hash.startswith('0x'):
warnings.warn(
'transaction_hash seems to be already encoded, this will'
' result in unexpected behavior'
)
if len(transaction_hash) != 32:
raise ValueError(
'transaction_hash length must be 32 (it might be hex encode)'
)
transaction_hash = data_encoder(transaction_hash)
return self.call('eth_getTransactionByHash', transaction_hash)
def poll(self, transaction_hash, confirmations=None, timeout=None):
""" Wait until the `transaction_hash` is applied or rejected.
If timeout is None, this could wait indefinitely!
Args:
transaction_hash (hash): Transaction hash that we are waiting for.
confirmations (int): Number of block confirmations that we will
wait for.
timeout (float): Timeout in seconds, raise an Excpetion on
timeout.
"""
if transaction_hash.startswith('0x'):
warnings.warn(
'transaction_hash seems to be already encoded, this will'
' result in unexpected behavior'
)
if len(transaction_hash) != 32:
raise ValueError(
'transaction_hash length must be 32 (it might be hex encode)'
)
transaction_hash = data_encoder(transaction_hash)
deadline = None
if timeout:
deadline = gevent.Timeout(timeout)
deadline.start()
try:
# used to check if the transaction was removed, this could happen
# if gas price is to low:
#
# > Transaction (acbca3d6) below gas price (tx=1 Wei ask=18
# > Shannon). All sequential txs from this address(7d0eae79)
# > will be ignored
#
last_result = None
while True:
# Could return None for a short period of time, until the
# transaction is added to the pool
transaction = self.call('eth_getTransactionByHash', transaction_hash)
# if the transaction was added to the pool and then removed
if transaction is None and last_result is not None:
raise Exception('invalid transaction, check gas price')
# the transaction was added to the pool and mined
if transaction and transaction['blockNumber'] is not None:
break
last_result = transaction
gevent.sleep(.5)
if confirmations:
# this will wait for both APPLIED and REVERTED transactions
transaction_block = quantity_decoder(transaction['blockNumber'])
confirmation_block = transaction_block + confirmations
block_number = self.blocknumber()
while block_number < confirmation_block:
gevent.sleep(.5)
block_number = self.blocknumber()
except gevent.Timeout:
raise Exception('timeout when polling for transaction')
finally:
if deadline:
deadline.cancel()
class MethodProxy(object):
""" A callable interface that exposes a contract function. """
valid_kargs = set(('gasprice', 'startgas', 'value'))
def __init__(self, sender, contract_address, function_name, translator,
call_function, transaction_function, estimate_function=None):
self.sender = sender
self.contract_address = contract_address
self.function_name = function_name
self.translator = translator
self.call_function = call_function
self.transaction_function = transaction_function
self.estimate_function = estimate_function
def transact(self, *args, **kargs):
assert set(kargs.keys()).issubset(self.valid_kargs)
data = self.translator.encode(self.function_name, args)
txhash = self.transaction_function(
sender=self.sender,
to=self.contract_address,
value=kargs.pop('value', 0),
data=data,
**kargs
)
return txhash
def call(self, *args, **kargs):
assert set(kargs.keys()).issubset(self.valid_kargs)
data = self.translator.encode(self.function_name, args)
res = self.call_function(
sender=self.sender,
to=self.contract_address,
value=kargs.pop('value', 0),
data=data,
**kargs
)
if res:
res = self.translator.decode(self.function_name, res)
res = res[0] if len(res) == 1 else res
return res
def estimate_gas(self, *args, **kargs):
if not self.estimate_function:
raise RuntimeError('estimate_function wasnt supplied.')
assert set(kargs.keys()).issubset(self.valid_kargs)
data = self.translator.encode(self.function_name, args)
res = self.estimate_function(
sender=self.sender,
to=self.contract_address,
value=kargs.pop('value', 0),
data=data,
**kargs
)
return res
def __call__(self, *args, **kargs):
if self.translator.function_data[self.function_name]['is_constant']:
return self.call(*args, **kargs)
else:
return self.transact(*args, **kargs)
class ContractProxy(object):
""" Exposes a smart contract as a python object.
Contract calls can be made directly in this object, all the functions will
be exposed with the equivalent api and will perform the argument
translation.
"""
def __init__(self, sender, abi, address, call_func, transact_func, estimate_function=None):
sender = normalize_address(sender)
self.abi = abi
self.address = address = normalize_address(address)
self.translator = ContractTranslator(abi)
for function_name in self.translator.function_data:
function_proxy = MethodProxy(
sender,
address,
function_name,
self.translator,
call_func,
transact_func,
estimate_function,
)
type_argument = self.translator.function_data[function_name]['signature']
arguments = [
'{type} {argument}'.format(type=type_, argument=argument)
for type_, argument in type_argument
]
function_signature = ', '.join(arguments)
function_proxy.__doc__ = '{function_name}({function_signature})'.format(
function_name=function_name,
function_signature=function_signature,
)
setattr(self, function_name, function_proxy)
# backwards compatibility
ABIContract = ContractProxy
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Performance.order'
db.add_column('gig_registry_performance', 'order',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
# Adding field 'Performance.time'
db.add_column('gig_registry_performance', 'time',
self.gf('django.db.models.fields.TimeField')(null=True, blank=True),
keep_default=False)
# Adding field 'Performance.stage'
db.add_column('gig_registry_performance', 'stage',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['gig_registry.Stage'], null=True, blank=True),
keep_default=False)
# Adding M2M table for field genre on 'Performance'
m2m_table_name = db.shorten_name('gig_registry_performance_genre')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('performance', models.ForeignKey(orm['gig_registry.performance'], null=False)),
('genre', models.ForeignKey(orm['gig_registry.genre'], null=False))
))
db.create_unique(m2m_table_name, ['performance_id', 'genre_id'])
def backwards(self, orm):
# Deleting field 'Performance.order'
db.delete_column('gig_registry_performance', 'order')
# Deleting field 'Performance.time'
db.delete_column('gig_registry_performance', 'time')
# Deleting field 'Performance.stage'
db.delete_column('gig_registry_performance', 'stage_id')
# Removing M2M table for field genre on 'Performance'
db.delete_table(db.shorten_name('gig_registry_performance_genre'))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'gig_registry.band': {
'Meta': {'object_name': 'Band'},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '300', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'founded': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'genre': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['gig_registry.Genre']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['gig_registry.Musician']", 'null': 'True', 'through': "orm['gig_registry.BandMembership']", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'updated_at': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'})
},
'gig_registry.bandmembership': {
'Meta': {'object_name': 'BandMembership'},
'band': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gig_registry.Band']"}),
'finished': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'musician': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gig_registry.Musician']"}),
'started': ('django.db.models.fields.DateField', [], {})
},
'gig_registry.genre': {
'Meta': {'object_name': 'Genre'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'gig_registry.gig': {
'Meta': {'object_name': 'Gig'},
'bands': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['gig_registry.Band']", 'through': "orm['gig_registry.Performance']", 'symmetrical': 'False'}),
'comment': ('django.db.models.fields.TextField', [], {'max_length': '300', 'blank': 'True'}),
'cost': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'finish': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'gig_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gig_registry.GigType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gig_registry.Source']", 'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateField', [], {}),
'updated_at': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'venue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gig_registry.Venue']"})
},
'gig_registry.gigtype': {
'Meta': {'object_name': 'GigType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
'gig_registry.location': {
'Meta': {'object_name': 'Location'},
'building_name': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'max_length': '300', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'created_at': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '6', 'blank': 'True'}),
'lon': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '6', 'blank': 'True'}),
'post_code': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'street_address': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'suburb': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'updated_at': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
'gig_registry.manager': {
'Meta': {'object_name': 'Manager', '_ormbases': ['gig_registry.Person']},
'person_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['gig_registry.Person']", 'unique': 'True', 'primary_key': 'True'})
},
'gig_registry.musician': {
'Meta': {'object_name': 'Musician', '_ormbases': ['gig_registry.Person']},
'instrument': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'person_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['gig_registry.Person']", 'unique': 'True', 'primary_key': 'True'})
},
'gig_registry.owner': {
'Meta': {'object_name': 'Owner', '_ormbases': ['gig_registry.Person']},
'person_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['gig_registry.Person']", 'unique': 'True', 'primary_key': 'True'})
},
'gig_registry.performance': {
'Meta': {'object_name': 'Performance'},
'band': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gig_registry.Band']"}),
'genre': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['gig_registry.Genre']", 'null': 'True', 'blank': 'True'}),
'gig': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gig_registry.Gig']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'stage': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gig_registry.Stage']", 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'})
},
'gig_registry.person': {
'Meta': {'object_name': 'Person'},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '300', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'nick_name': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
'gig_registry.source': {
'Meta': {'unique_together': "(('name', 'published'),)", 'object_name': 'Source'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'published': ('django.db.models.fields.DateField', [], {}),
'source_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gig_registry.SourceType']", 'null': 'True', 'blank': 'True'})
},
'gig_registry.sourcetype': {
'Meta': {'object_name': 'SourceType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
'gig_registry.stage': {
'Meta': {'object_name': 'Stage'},
'capacity': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'gig_registry.venue': {
'Meta': {'object_name': 'Venue'},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '300', 'blank': 'True'}),
'established': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gig_registry.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'stages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['gig_registry.Stage']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'O'", 'max_length': '1'}),
'status_notes': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'venue_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
}
}
complete_apps = ['gig_registry']
| |
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
import cPickle as pickle
from openmdao.api import Problem
from wakeexchange.OptimizationGroups import OptAEP
from wakeexchange.gauss import gauss_wrapper, add_gauss_params_IndepVarComps
from wakeexchange.floris import floris_wrapper, add_floris_params_IndepVarComps
from wakeexchange.utilities import sunflower_points
def plot_data_vs_model(ax=None, datax=np.zeros(0), datay=np.zeros(0), modelx=np.zeros(0),
modely=np.zeros(0), title='', xlabel='', ylabel='', datalabel='',
modellabel='', modelcolor='r', modelline='--', xscalar=1./126.4, yscalar=1E-3,
sum=True, front=True, second=True, legend=True):
if ax is None:
fig = plt.figure()
ax = fig.gca()
# plot data
if datax.size > 0:
if front:
ax.plot(datax*xscalar, datay[:, 0]*yscalar, 'o', mec='k', mfc='none', label=datalabel)
if second:
ax.plot(datax*xscalar, datay[:, 1]*yscalar, '^', mec='k', mfc='none')
if sum:
ax.plot(datax*xscalar, datay[:, 0]*yscalar+datay[:, 1]*yscalar, 'ks', mec='k', mfc='none')
# plot model
if modelx.size > 0:
# plot model
if front:
ax.plot(modelx*xscalar, modely[:, 0]*yscalar, modelline+modelcolor, label=modellabel)
if second:
ax.plot(modelx*xscalar, modely[:, 1]*yscalar, modelline+modelcolor)
if sum:
ax.plot(modelx*xscalar, modely[:, 0]*yscalar+modely[:, 1]*yscalar, modelline+'k')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
if legend:
ax.legend()
# quit()
# quit()
return ax
def setup_probs():
nTurbines = 2
nDirections = 1
rotorDiameter = 126.4
rotorArea = np.pi*rotorDiameter*rotorDiameter/4.0
axialInduction = 1.0/3.0
CP = 0.7737/0.944 * 4.0 * 1.0/3.0 * np.power((1 - 1.0/3.0), 2)
# CP =0.768 * 4.0 * 1.0/3.0 * np.power((1 - 1.0/3.0), 2)
CT = 4.0*axialInduction*(1.0-axialInduction)
generator_efficiency = 0.944
# Define turbine characteristics
axialInduction = np.array([axialInduction, axialInduction])
rotorDiameter = np.array([rotorDiameter, rotorDiameter])
generatorEfficiency = np.array([generator_efficiency, generator_efficiency])
yaw = np.array([0., 0.])
hubHeight = np.array([90.0, 90.0])
# Define site measurements
wind_direction = 270.-0.523599*180./np.pi
wind_speed = 8. # m/s
air_density = 1.1716
Ct = np.array([CT, CT])
Cp = np.array([CP, CP])
gauss_prob = Problem(root=OptAEP(nTurbines=nTurbines, nDirections=nDirections, use_rotor_components=False,
wake_model=gauss_wrapper, datasize=0, minSpacing=2.0,
params_IdepVar_func=add_gauss_params_IndepVarComps,
params_IndepVar_args={}))
floris_options = {'differentiable': True, 'nSamples': 0, 'use_rotor_components': False}
floris_prob_orig = Problem(root=OptAEP(nTurbines=nTurbines, nDirections=nDirections, use_rotor_components=False,
wake_model=floris_wrapper, wake_model_options=floris_options, datasize=0,
params_IdepVar_func=add_floris_params_IndepVarComps,
params_IndepVar_args={}))
floris_prob_tuned = Problem(root=OptAEP(nTurbines=nTurbines, nDirections=nDirections, use_rotor_components=False,
wake_model=floris_wrapper, wake_model_options=floris_options, datasize=0,
params_IdepVar_func=add_floris_params_IndepVarComps,
params_IndepVar_args={}))
probs = [gauss_prob, floris_prob_orig, floris_prob_tuned]
for prob in probs:
prob.setup()
if prob is floris_prob_orig or prob is floris_prob_tuned:
prob['model_params:useWakeAngle'] = True
turbineX = np.array([1118.1, 1881.9])
turbineY = np.array([1279.5, 1720.5])
# prob['gen_params:CTcorrected'] = False
# prob['gen_params:CPcorrected'] = False
prob['turbineX'] = turbineX
prob['turbineY'] = turbineY
prob['rotorDiameter'] = rotorDiameter
prob['axialInduction'] = axialInduction
prob['generatorEfficiency'] = generatorEfficiency
prob['air_density'] = air_density
prob['Cp_in'] = Cp
prob['Ct_in'] = Ct
prob['windSpeeds'] = np.array([wind_speed])
prob['windDirections'] = np.array([wind_direction])
prob['hubHeight'] = hubHeight
if prob is gauss_prob:
sort_turbs = True
wake_combination_method = 1 # can be [0:Linear freestreem superposition,
# 1:Linear upstream velocity superposition,
# 2:Sum of squares freestream superposition,
# 3:Sum of squares upstream velocity superposition]
ti_calculation_method = 0 # can be [0:No added TI calculations,
# 1:TI by Niayifar and Porte Agel altered by Annoni and Thomas,
# 2:TI by Niayifar and Porte Agel 2016,
# 3:no yet implemented]
calc_k_star = True
z_ref = 90.0
z_0 = 0.001
k_calc = 0.065
# tuned with 1 rotor point: error_turbine2: 380593.475508 ky: 0.0147484983033 kz: 0.0365360001244 I: 1.0 shear_exp: 0.0804912726779
# tuned with 500 rotor points: error_turbine2: 505958.824163 ky: 0.010239469297 kz: 0.0187826477801 I: 0.5 shear_exp: 0.115698347406
# tuned with 1000 rotor points: error_turbine2: 440240.45048 ky: 0.0132947699754 kz: 0.0267832386866 I: 0.149427342515 shear_exp: 0.107996557048
# tuned with k_star and 1000 rotor points: error_turbine2: 759565.303289 ky: 0.065 kz: 0.065 I: 0.0765060707278 shear_exp: 0.104381464423
# using NPA to calculate initial spreading, but then letting BPA adjust it with TI after that. 1000 rotor points
# error_turbine2: 759565.279351 ky: 0.0330333796913 kz: 0.0330333796913 I: 0.0765060716478 shear_exp: 0.104381467026
# using NPA to calculate initial spreading, but then letting BPA adjust it with TI after that. 16 rotor points
# error_turbine2: 642639.730582 ky: 0.0307280539404 kz: 0.0307280539404 I: 0.0704979253074 shear_exp: 0.108435318499
# tuning only shear_exp with 16 rotor points: error_turbine2: 779216.077341 ky: 0.0267 kz: 0.0267 I: 0.06 shear_exp: 0.161084449732
I = .063 # + 0.04
# I = .06
ky = 0.3837*I + 0.003678
# ky = 0.022
kz = 0.3837*I + 0.003678
# kz = 0.022
# shear_exp = 0.161084449732
shear_exp = 0.11
nRotorPoints = 1
prob['model_params:wake_combination_method'] = wake_combination_method
prob['model_params:ti_calculation_method'] = ti_calculation_method
prob['model_params:calc_k_star'] = calc_k_star
prob['model_params:sort'] = sort_turbs
prob['model_params:z_ref'] = z_ref
prob['model_params:z_0'] = z_0
prob['model_params:ky'] = ky
prob['model_params:kz'] = kz
prob['model_params:I'] = I
prob['model_params:shear_exp'] = shear_exp
print "in gauss setup"
if nRotorPoints > 1:
prob['model_params:RotorPointsY'], prob['model_params:RotorPointsZ'] = sunflower_points(nRotorPoints)
print "setting rotor points"
return probs
# def set_params(probs):
# floris params
# probs[2]['model_params:kd'] = 0.224109
# probs[2]['model_params:initialWakeAngle'] = 3.384485
# probs[2]['model_params:initialWakeDisplacement'] = 8.407578
# probs[2]['model_params:bd'] = -0.010000
# probs[2]['model_params:ke'] = 0.055072
# probs[2]['model_params:me'] = np.array([-0.000001, 0.181752, 1.0])
# probs[2]['model_params:MU'] = np.array([0.933389, 1.0, 17.558286])
# probs[2]['model_params:aU'] = 6.751072
# probs[2]['model_params:bU'] = 1.681766
# probs[2]['model_params:cos_spread'] = 9.989090
# gauss params
# probs[0]['model_params:ke'] = 0.052
# probs[0]['model_params:spread_angle'] = 6.
# probs[0]['model_params:rotation_offset_angle'] = 2.0
# for axialInd calc only
# probs[0]['model_params:ke'] = 0.050688
# probs[0]['model_params:spread_angle'] = 7.562716
# probs[0]['model_params:rotation_offset_angle'] = 3.336568
# for axialInd and inflow adjust
# probs[0]['model_params:ke'] = 0.052333
# probs[0]['model_params:spread_angle'] = 8.111330
# probs[0]['model_params:rotation_offset_angle'] = 2.770265
# for inflow adjust only
# probs[0]['model_params:ke'] = 0.052230
# probs[0]['model_params:spread_angle'] = 6.368191
# probs[0]['model_params:rotation_offset_angle'] = 1.855112
# for added n_st_dev param #1
# probs[0]['model_params:ke'] = 0.050755
# probs[0]['model_params:spread_angle'] = 11.205766#*0.97
# probs[0]['model_params:rotation_offset_angle'] = 3.651790
# probs[0]['model_params:n_std_dev'] = 9.304371
# for added n_st_dev param #2
# probs[0]['model_params:ke'] = 0.051010
# probs[0]['model_params:spread_angle'] = 11.779591
# probs[0]['model_params:rotation_offset_angle'] = 3.564547
# probs[0]['model_params:n_std_dev'] = 9.575505
# for decoupled ky with n_std_dev = 4
# probs[0]['model_params:ke'] = 0.051145
# probs[0]['model_params:spread_angle'] = 2.617982
# probs[0]['model_params:rotation_offset_angle'] = 3.616082
# probs[0]['model_params:ky'] = 0.211496
# for integrating for decoupled ky with n_std_dev = 4, linear, integrating
# probs[0]['model_params:ke'] = 0.016969
# probs[0]['model_params:spread_angle'] = 0.655430
# probs[0]['model_params:rotation_offset_angle'] = 3.615754
# probs[0]['model_params:ky'] = 0.195392
# for integrating for decoupled ky with n_std_dev = 4, linear, integrating
# probs[0]['model_params:ke'] = 0.008858
# probs[0]['model_params:spread_angle'] = 0.000000
# probs[0]['model_params:rotation_offset_angle'] = 4.035276
# probs[0]['model_params:ky'] = 0.199385
# for decoupled ke with n_std_dev=4, linear, not integrating
# probs[0]['model_params:ke'] = 0.051190
# probs[0]['model_params:spread_angle'] = 2.619202
# probs[0]['model_params:rotation_offset_angle'] = 3.629337
# probs[0]['model_params:ky'] = 0.211567
# for n_std_dev = 4, error = 1332.49, not integrating, power law
# probs[0]['model_params:ke'] = 0.051360
# probs[0]['model_params:rotation_offset_angle'] = 3.197348
# probs[0]['model_params:Dw0'] = 1.804024
# probs[0]['model_params:Dw0'] = 1.63
# probs[0]['model_params:m'] = 0.00
# for n_std_dev = 5.4, error = 1136.21, not integrating, power law
# probs[0]['model_params:ke'] = 0.051147
# probs[0]['model_params:rotation_offset_angle'] = 3.616963
# probs[0]['model_params:Dw0'] = 1.834599
# probs[0]['model_params:m'] = 0.096035
# for decoupled ky with n_std_dev = 4, error = 1630.8, with integrating, power law
# probs[0]['model_params:ke'] = 0.033165
# probs[0]['model_params:rotation_offset_angle'] = 3.328051
# probs[0]['model_params:Dw0'] = 1.708328
# probs[0]['model_params:m'] = 0.0
# for decoupled ky with n_std_dev = 4, error = 1140.59, not integrating, power law for expansion,
# linear for yaw
# probs[0]['model_params:ke'] = 0.050741
# probs[0]['model_params:rotation_offset_angle'] = 3.628737
# probs[0]['model_params:Dw0'] = 0.846582
# probs[0]['model_params:ky'] = 0.207734
# for decoupled ky with n_std_dev = 4, error = 1058.73, integrating, power law for expansion,
# linear for yaw
# probs[0]['model_params:ke'] = 0.016129
# probs[0]['model_params:rotation_offset_angle'] = 3.644356
# probs[0]['model_params:Dw0'] = 0.602132
# probs[0]['model_params:ky'] = 0.191178
# for power law yaw, deficit, and expansion, error = 1759.5
# probs[0]['model_params:rotation_offset_angle'] = 1.393646
# probs[0]['model_params:Dw0'] = 1.254036
# probs[0]['model_params:m'] = 0.166732
# for power law yaw, deficit, and expansion (reccomended values)
# probs[0]['model_params:rotation_offset_angle'] = 1.393646
# probs[0]['model_params:Dw0'] = 1.33
# probs[0]['model_params:m'] = 0.33
# for power law all, Dw0 separate, tuned m
# probs[0]['model_params:rotation_offset_angle'] = 1.454099
# probs[0]['model_params:Dw0'] = np.array([1.305050, 1.401824, 1.420907])
# probs[0]['model_params:m'] = 0.101128
# for power law all, Dw0 separate, constant m
# probs[0]['model_params:rotation_offset_angle'] = 1.454099
# probs[0]['model_params:rotation_offset_angle'] = 1.096865
# probs[0]['model_params:Dw0'] = np.array([1.281240, 0.897360, 0.911161])
# probs[0]['model_params:Dw0'] = np.array([1.3, 1.00005, 1.])
# probs[0]['model_params:m'] = 0.
# for power all but deficit with constant m
# probs[0]['model_params:ke'] = 0.051126
# probs[0]['model_params:rotation_offset_angle'] = 3.603684
# probs[0]['model_params:Dw0'] = np.array([1.794989, 0.863206, 1.])
# probs[0]['model_params:m'] = 0.33
# for power law all with constant m
# probs[0]['model_params:rotation_offset_angle'] = 0.620239
# probs[0]['model_params:Dw0'] = np.array([1.265505, 0.958504, 0.896609])
# probs[0]['model_params:Dw0'] = np.array([1.3, 0.958504, 0.896609])
# probs[0]['model_params:m'] = 0.33
# for power law all with tuned m
# probs[0]['model_params:rotation_offset_angle'] = 0.727846
# probs[0]['model_params:Dw0'] = np.array([1.185009, 1.140757, 1.058244])
# probs[0]['model_params:m'] = 0.230722
# for power law all with tuned m and double weight yaw error
# probs[0]['model_params:rotation_offset_angle'] = 0.802148541875
# probs[0]['model_params:Dw0'] = np.array([1.18307813, 1.16833547, 1.08521648])
# probs[0]['model_params:m'] = 0.210864251457
# for power law all with tuned m and 20x weight yaw error
# probs[0]['model_params:rotation_offset_angle'] = 0.871926
# probs[0]['model_params:Dw0'] = np.array([1.190799, 1.223558, 1.142646])
# probs[0]['model_params:m'] = 0.167548
# for power law all with individually tuned m and Dw0
# probs[0]['model_params:rotation_offset_angle'] = 0.811689835284
# probs[0]['model_params:Dw0'] = np.array([1.22226021, 1.39849858, 0.97207545])
# probs[0]['model_params:m'] = np.array([0.15566507, 0.1, 0.28422703])
# for power law all with individually tuned m and Dw0, yaw weighted by 3
# probs[0]['model_params:rotation_offset_angle'] = 0.884526810188
# probs[0]['model_params:Dw0'] = np.array([1.21546909, 1.37702043, 0.95703538])
# probs[0]['model_params:m'] = np.array([0.17499415, 0.1, 0.28738021])
# for power law all with individually tuned m and Dw0, yaw weighted by 3
# probs[0]['model_params:rotation_offset_angle'] = 0.726281139043
# probs[0]['model_params:Dw0'] = np.array([10.80879724, 1.25208657, 0.62180341])
# probs[0]['model_params:m'] = np.array([0.5014354, 0.1, 0.53332655])
# for individual power law for diam and deficit. Yaw with linear model
# probs[0]['model_params:rotation_offset_angle'] = 0.810644329131
# probs[0]['model_params:Dw0'] = np.array([1.3, 1.64288886, 0.9818137])
# probs[0]['model_params:m'] = np.array([0.33, 0., 0.27860778])
# probs[0]['model_params:ky'] = 0.0679899837662
# for power law all with individually tuned m and Dw0, using 2*a instead of a-1
# probs[0]['model_params:rotation_offset_angle'] = 2.11916457882
# probs[0]['model_params:Dw0'] = np.array([1.86868658, 1.6258426, 0.94648549])
# probs[0]['model_params:m'] = np.array([0., 0., 0.29782246])
# # for power law with individually tuned m and Dw0, linear yaw, including rotor offset, using 2*a instead of a-1
# probs[0]['model_params:rotation_offset_angle'] = 1.482520
# probs[0]['model_params:ky'] = 0.204487
# probs[0]['model_params:Dw0'] = np.array([1.3, 0.607414, 0.107801])
# probs[0]['model_params:m'] = np.array([0.33, 0., 0.964934])
# for power law with individually tuned m and Dw0 including rotor offset for diam and deficit, using 2*a instead of a-1
# probs[0]['model_params:rotation_offset_angle'] = 2.054952
# probs[0]['model_params:Dw0'] = np.array([1.869272, 0.612485, 0.123260])
# probs[0]['model_params:m'] = np.array([0., 0., 0.885561])
# for power law with individually tuned m and Dw0 using Aitken power law for deficit, linear offset
# probs[0]['model_params:rotation_offset_angle'] = 0.921858
# probs[0]['model_params:ky'] = 0.085021
# probs[0]['model_params:Dw0'] = np.array([1.342291, 1.641186, 0.728072])
# probs[0]['model_params:m'] = np.array([0.100775, 0., -0.585698])
# for power law with individually tuned m and Dw0 using Aitken power law for deficit, inflow for Fleming data at 8.3....
# probs[0]['model_params:rotation_offset_angle'] = 1.062842
# probs[0]['model_params:rotation_offset_angle'] = 2.062842
# probs[0]['model_params:Dw0'] = np.array([1.333577, 1.621352, 0.639195])
# probs[0]['model_params:m'] = np.array([0.130396, 0., -0.522295])
# for power law with individually tuned m and Dw0 using Aitken power law for deficit, inflow for Fleming data at 8.3....
# probs[0]['model_params:rotation_offset_angle'] = 0.946076
# probs[0]['model_params:Dw0'] = np.array([1.353735, 1.623139, 0.656002])
# probs[0]['model_params:m'] = np.array([0.236072, 0., -0.541287])
# for power law with suggested m and Dw0 using Aitken power law for deficit, inflow for Fleming data at 8.3....
# probs[0]['model_params:rotation_offset_angle'] = 1.5
# probs[0]['model_params:Dw0'] = np.array([1.3, 1.3, 0.56])
# probs[0]['model_params:m'] = np.array([0.33, 0.33, -0.57])
# linear everything - coupled - tuned to all data - inflow for Fleming data at 8.3....
# probs[0]['model_params:ke'] = 0.052166
# probs[0]['model_params:spread_angle'] = 3.156446
# probs[0]['model_params:rotation_offset_angle'] = 1.124459
# probs[0]['model_params:ky'] = 0.247883
# for n_std_dev = 4, error = 1332.49, not integrating, power law
# probs[0]['model_params:ke'] = 0.051360
# probs[0]['model_params:rotation_offset_angle'] = 3.197348
# probs[0]['model_params:Dw0'] = np.array([1.804024, 1.804024, 1.804024])
# probs[0]['model_params:m'] = np.array([0.0, 0.0, 0.0])
# for n_std_dev = 4, linear all, 2*D
# probs[0]['model_params:ke'] = 0.112334
# probs[0]['model_params:ky'] = 0.468530
# probs[0]['model_params:spread_angle'] = 0.0
# probs[0]['model_params:rotation_offset_angle'] = 1.915430
# rederived yaw with power. Power law all
# probs[0]['model_params:rotation_offset_angle'] = 1.5*0.946076
# probs[0]['model_params:Dw0'] = np.array([1.353735, 1.623139, 0.656002])
# probs[0]['model_params:m'] = np.array([0.236072, 0.0, -0.541287])
# rederived yaw with power. Power law all. Dw0[0]=Dw0[1], m[0]=m[1]
# probs[0]['model_params:rotation_offset_angle'] = 1.02985
# probs[0]['model_params:Dw0'] = np.array([1.388779, 1.388779, 0.642637])
# probs[0]['model_params:m'] = np.array([0.100669, 0.100669, -0.530337])
# rederived yaw with power. Power law all. Dw0[0]=Dw0[1], m[0]=m[1], tuned to all data
# probs[0]['model_params:rotation_offset_angle'] = 1.052238
# probs[0]['model_params:Dw0'] = np.array([1.364724, 1.364724, 0.663934])
# probs[0]['model_params:m'] = np.array([0.092746, 0.092746, -0.542009])
# rederived yaw with power. Power law all. Dw0[0]=Dw0[1], m[0]=m[1], tuned to all data
# rederived deficit using actuator disc and momentum balance
# probs[0]['model_params:rotation_offset_angle'] = 2.089085
# probs[0]['model_params:Dw0'] = np.array([1.488695, 1.488695, 0.560000])
# probs[0]['model_params:m'] = np.array([0.000000, 0.000000, -0.542009])
# probs[0]['model_params:rotation_offset_angle'] = 1.749621
# probs[0]['model_params:Dw0'] = np.array([1.267740, 1.267740, 0.560000])
# probs[0]['model_params:m'] = np.array([0.000000, 0.000000, -0.542009])
# power law as per Aitken et all plus axial induction*2
# this is a pretty reasonable fit, but defines no expansion in the wake
# probs[0]['model_params:rotation_offset_angle'] = 2.229160
# probs[0]['model_params:Dw0'] = np.array([1.889748, 1.603116, 1.037203])
# probs[0]['model_params:m'] = np.array([0.000000, 0.000000, -0.563005])
# power law as per Aitken et all plus axial induction*2, added x shift by 1D
# probs[0]['model_params:rotation_offset_angle'] = 2.078138 + 1.5
# probs[0]['model_params:Dw0'] = np.array([2.040208, 1.596522, 1.474140])
# probs[0]['model_params:m'] = np.array([0.00000, 0.000000, -0.698327])
# power law as per Aitken et all plus axial induction*2, added x shift by 1D except for deficit
# also a reasonable fit, but no wake expansion
# probs[0]['model_params:rotation_offset_angle'] = 2.038664
# probs[0]['model_params:Dw0'] = np.array([2.038664, 1.601559, 1.055975])
# probs[0]['model_params:m'] = np.array([0.00000, 0.000000, -0.574079])
# power law as per Aitken et all plus axial induction*2, added y shift tunable
# excellent fit, but no wake expansion and uses linear yaw offset
# probs[0]['model_params:rotation_offset_angle'] = 8.466369
# probs[0]['model_params:Dw0'] = np.array([1.893739, 1.586107, 0.987548])
# probs[0]['model_params:m'] = np.array([0.00000, 0.000000, -0.524822])
# probs[0]['model_params:yshift'] = -21.775754
# probs[0]['model_params:rotation_offset_angle'] = 10.762858
# probs[0]['model_params:Dw0'] = np.array([1.748372, 1.345945, 1.045982])
# probs[0]['model_params:m'] = np.array([0.100000, 0.100000, -0.556969])
# probs[0]['model_params:yshift'] = -30.551647
# using Bastankhah with linear yaw
# probs[0]['model_params:ke'] = 0.077491
# probs[0]['model_params:ky'] = 0.159944
# probs[0]['model_params:yshift'] = -4.614311
# Bastankhah with Bastankhah yaw
# probs[0]['model_params:ke'] = 0.07747
# probs[0]['model_params:ky'] = 0.159944
# probs[0]['model_params:yshift'] = -4.614311
# probs[0]['model_params:ke'] = 0.078413
# probs[0]['model_params:ky'] = 0.641951
# probs[0]['model_params:yshift'] = -3.870224
# probs[0]['model_params:ke'] = 0.038558
# probs[0]['model_params:ky'] = 0.078129
# probs[0]['model_params:yshift'] = -19.948941
# probs[0]['model_params:rotation_offset_angle'] = -4.0
# probs[0]['model_params:ke'] = 0.038993
# probs[0]['model_params:ky'] = 0.087260
# probs[0]['model_params:yshift'] = -4.614311
# probs[0]['model_params:ke'] = 0.0390487790134
# probs[0]['model_params:ky'] = 0.039
# probs[0]['model_params:rotation_offset_angle'] = 0.72681975016
# ke = ky tuned to all
# probs[0]['model_params:ke'] = 0.039166
# probs[0]['model_params:ky'] = 0.039166
# probs[0]['model_params:rotation_offset_angle'] = 1.044754
# ke != ky tuned to all
# probs[0]['model_params:ke'] = 0.039200
# probs[0]['model_params:ky'] = 0.048369
# probs[0]['model_params:rotation_offset_angle'] = 1.175184
# ke != ky tuned to 7D
# probs[0]['model_params:ke'] = 0.035706
# probs[0]['model_params:ky'] = 0.046970
# probs[0]['model_params:rotation_offset_angle'] = 2.342700
# ke = ky tuned to 7D
# probs[0]['model_params:ke'] = 0.036002
# probs[0]['model_params:ky'] = 0.036002
# probs[0]['model_params:rotation_offset_angle'] = 1.5
# Bastankhah with power yaw
# probs[0]['model_params:ke'] = 0.07747
# probs[0]['model_params:Dw0'] = np.array([1.49752, 1.3, 1.3])
# probs[0]['model_params:m'] = np.array([0.23975, 0.33, 0.33])
# probs[0]['model_params:yshift'] = -4.63626
# linear everything - coupled - tuned to all data - inflow for Fleming data at 8.3....
# probs[0]['model_params:ke'] = 0.051690
# probs[0]['model_params:spread_angle'] = 3.115443
# probs[0]['model_params:rotation_offset_angle'] = 1.235173
# probs[0]['model_params:ky'] = 0.205729
# probs[0]['model_params:integrate'] = False
# probs[0]['model_params:spread_mode'] = 'power'
# probs[0]['model_params:yaw_mode'] = 'power'
# probs[0]['model_params:n_std_dev'] = 4.
if __name__ == "__main__":
probs = setup_probs()
# set_params(probs)
# time the models
import time
t1 = time.time()
for i in range(0, 100):
probs[0].run()
t2 = time.time()
for i in range(0, 100):
probs[1].run()
t3 = time.time()
# gauss time: 0.0580031871796
# floris time: 0.10697388649
print 'gauss time: ', t2-t1
print 'floris time: ', t3-t2
print probs[1]['wtVelocity0']
print probs[1]['wtPower0']
print probs[1]['AEP']
# load data
ICOWESdata = loadmat('../data/YawPosResults.mat')
with open('../data/yawPower.p', 'rb') as handle:
yawrange_4D, SOWFApower_yaw_4D, _, _ = pickle.load(handle)
with open('../data/offset4DPower.p', 'rb') as handle:
posrange_cs_4D, SOWFApower_cs_4D = pickle.load(handle)
with open('../data/offset6DPower.p', 'rb') as handle:
posrange_cs_6D, SOWFApower_cs_6D = pickle.load(handle)
with open('../data/spacePower.p', 'rb') as handle:
posrange_ds, SOWFApower_ds = pickle.load(handle)
# set plot params
rotor_diameter = probs[0]['rotorDiameter'][0]
ICOWESvelocity = 8.0
PFvelocity = 8.48673684
PFvelocity = 8.38673684
power_scalar = 1E-3
distance_scalar = 1./rotor_diameter
velocity_scalar = 1.
angle_scalar = 1.
floris_color = 'b'
gauss_color = 'r'
floris_tuned_color = 'g'
floris_line = '-'
floris_tuned_line = '-.'
gauss_line = '--'
FlorisError = 0.0
GaussError = 0.0
FlorisTunedError = 0.0
# ################## compare yaw ######################
YawPowFig, YawPowAx = plt.subplots(ncols=2, nrows=1, sharey=False)
plt.hold(True)
# 4D yaw
yawrange = np.array(list(yawrange_4D))
GaussianPower = list()
FlorisPower = list()
FlorisPowerTuned = list()
# set to 4D positions and inflow velocity
for prob in probs:
prob['turbineX'] = np.array([1118.1, 1556.0])
prob['turbineY'] = np.array([1279.5, 1532.3])
prob['windSpeeds'] = np.array([PFvelocity])
for yaw1 in yawrange:
for prob in probs:
prob['yaw0'] = np.array([yaw1, 0.0])
prob.run()
GaussianPower.append(list(probs[0]['wtPower0']))
FlorisPower.append(list(probs[1]['wtPower0']))
FlorisPowerTuned.append(list(probs[2]['wtPower0']))
GaussianPower = np.array(GaussianPower)
FlorisPower = np.array(FlorisPower)
FlorisPowerTuned = np.array(FlorisPowerTuned)
# print FlorisPower
print FlorisPower
print GaussianPower
SOWFApower = SOWFApower_yaw_4D*1E-3
plot_data_vs_model(ax=YawPowAx[0], modelx=yawrange,
modely=FlorisPower,
modellabel='FLORIS', modelcolor=floris_color, modelline=floris_line,
xscalar=angle_scalar, yscalar=power_scalar, legend=True)
plot_data_vs_model(ax=YawPowAx[0], datax=yawrange, datay=SOWFApower, modelx=yawrange,
modely=GaussianPower, title='4D', xlabel='yaw angle (deg.)', ylabel='Power (MW)',
datalabel='SOWFA', modellabel='Gauss', modelcolor=gauss_color, modelline=gauss_line,
xscalar=angle_scalar, yscalar=power_scalar, legend=True)
# plot_data_vs_model(ax=YawPowAx[0], datax=yawrange, datay=SOWFApower, modelx=yawrange,
# modely=FlorisPowerTuned, title='4D', xlabel='yaw angle (deg.)', ylabel='Power (MW)',
# datalabel='SOWFA', modellabel='Floris Re-Tuned', modelcolor=floris_tuned_color,
# modelline=floris_tuned_line, xscalar=angle_scalar, yscalar=power_scalar)
FlorisError += np.sum((SOWFApower[:, 1]-FlorisPower[:, 1])**2)
GaussError += np.sum((SOWFApower[:, 1]-GaussianPower[:, 1])**2)
FlorisTunedError += np.sum((SOWFApower[:, 1]-FlorisPowerTuned[:, 1])**2)
# 7D yaw
yawrange = ICOWESdata['yaw'][0]
GaussianPower = list()
FlorisPower = list()
FlorisPowerTuned = list()
# set to 7D positions
for prob in probs:
prob['turbineX'] = np.array([1118.1, 1881.9])
prob['turbineY'] = np.array([1279.5, 1720.5])
prob['windSpeeds'] = np.array([ICOWESvelocity])
# run analysis
for yaw1 in yawrange:
for prob in probs:
prob['yaw0'] = np.array([yaw1, 0.0])
prob.run()
GaussianPower.append(list(probs[0]['wtPower0']))
FlorisPower.append(list(probs[1]['wtPower0']))
FlorisPowerTuned.append(list(probs[2]['wtPower0']))
GaussianPower = np.array(GaussianPower)
FlorisPower = np.array(FlorisPower)
FlorisPowerTuned = np.array(FlorisPowerTuned)
# plot
SOWFApower = np.array([ICOWESdata['yawPowerT1'][0], ICOWESdata['yawPowerT2'][0]]).transpose()/1000.
plot_data_vs_model(ax=YawPowAx[1], modelx=yawrange,
modely=FlorisPower,
modellabel='FLORIS', modelcolor=floris_color, modelline=floris_line,
xscalar=angle_scalar, yscalar=power_scalar, legend=True)
plot_data_vs_model(ax=YawPowAx[1], datax=yawrange, datay=SOWFApower, modelx=yawrange,
modely=GaussianPower, title='7D', xlabel='yaw angle (deg.)', ylabel='Power (MW)',
datalabel='SOWFA', modellabel='Gauss', modelcolor=gauss_color, modelline=gauss_line,
xscalar=angle_scalar, yscalar=power_scalar, legend=True)
# plot_data_vs_model(ax=YawPowAx[1], datax=yawrange, datay=SOWFApower, modelx=yawrange,
# modely=FlorisPowerTuned, title='7D', xlabel='yaw angle (deg.)', ylabel='Power (MW)',
# datalabel='SOWFA', modellabel='Floris Re-Tuned', modelcolor=floris_tuned_color,
# modelline=floris_tuned_line, xscalar=angle_scalar, yscalar=power_scalar)
FlorisError += np.sum((SOWFApower[:, 1]-FlorisPower[:, 1])**2)
GaussError += np.sum((SOWFApower[:, 1]-GaussianPower[:, 1])**2)
FlorisTunedError += np.sum((SOWFApower[:, 1]-FlorisPowerTuned[:, 1])**2)
# ################## compare position ######################
PosPowFig, PosPowAx = plt.subplots(ncols=2, nrows=2, sharey=False)
for prob in probs:
prob['yaw0'] = np.array([0.0, 0.0])
prob['windSpeeds'] = np.array([PFvelocity])
# position crosswind 4D
posrange = np.array(list(posrange_cs_4D))
GaussianPower = list()
FlorisPower = list()
FlorisPowerTuned = list()
for pos2 in posrange:
# Define turbine locations and orientation (4D)
effUdXY = 0.523599
Xinit = np.array([1118.1, 1556.0])
Yinit = np.array([1279.5, 1532.3])
XY = np.array([Xinit, Yinit]) + np.dot(np.array([[np.cos(effUdXY), -np.sin(effUdXY)],
[np.sin(effUdXY), np.cos(effUdXY)]]),
np.array([[0., 0], [0, pos2]]))
for prob in probs:
prob['turbineX'] = XY[0, :]
prob['turbineY'] = XY[1, :]
prob.run()
GaussianPower.append(list(probs[0]['wtPower0']))
FlorisPower.append(list(probs[1]['wtPower0']))
FlorisPowerTuned.append(list(probs[2]['wtPower0']))
GaussianPower = np.array(GaussianPower)
FlorisPower = np.array(FlorisPower)
FlorisPowerTuned = np.array(FlorisPowerTuned)
SOWFApower = SOWFApower_cs_4D*1E-3
# print error_turbine2
plot_data_vs_model(ax=PosPowAx[0, 0], modelx=posrange,
modely=FlorisPower,
modellabel='FLORIS', modelcolor=floris_color, modelline=floris_line,
xscalar=distance_scalar, yscalar=power_scalar)
plot_data_vs_model(ax=PosPowAx[0, 0], datax=posrange, datay=SOWFApower, modelx=posrange,
modely=GaussianPower, title='4D', xlabel='y/D', ylabel='Power (MW)',
datalabel='SOWFA', modellabel='Gauss', modelcolor=gauss_color, modelline=gauss_line,
xscalar=distance_scalar, yscalar=power_scalar)
# plot_data_vs_model(ax=PosPowAx[0, 0], datax=posrange, datay=SOWFApower, modelx=posrange,
# modely=FlorisPowerTuned, title='4D', xlabel='y/D', ylabel='Power (MW)',
# datalabel='SOWFA', modellabel='Floris Re-Tuned', modelcolor=floris_tuned_color,
# modelline=floris_tuned_line, xscalar=distance_scalar, yscalar=power_scalar)
FlorisError += np.sum((SOWFApower[:, 1]-FlorisPower[:, 1])**2)
GaussError += np.sum((SOWFApower[:, 1]-GaussianPower[:, 1])**2)
FlorisTunedError += np.sum((SOWFApower[:, 1]-FlorisPowerTuned[:, 1])**2)
# position crosswind 6D
posrange = np.array(list(posrange_cs_6D))
GaussianPower = list()
FlorisPower = list()
FlorisPowerTuned = list()
for prob in probs:
prob['windSpeeds'] = np.array([PFvelocity])
for pos2 in posrange:
# Define turbine locations and orientation (4D)
effUdXY = 0.523599
Xinit = np.array([1118.1, 1556.0])
Yinit = np.array([1279.5, 1532.3])
XY = np.array([Xinit, Yinit]) + np.dot(np.array([[np.cos(effUdXY), -np.sin(effUdXY)],
[np.sin(effUdXY), np.cos(effUdXY)]]),
np.array([[0., 0], [0, pos2]]))
for prob in probs:
prob['turbineX'] = XY[0, :]
prob['turbineY'] = XY[1, :]
prob.run()
GaussianPower.append(list(probs[0]['wtPower0']))
FlorisPower.append(list(probs[1]['wtPower0']))
FlorisPowerTuned.append(list(probs[2]['wtPower0']))
GaussianPower = np.array(GaussianPower)
FlorisPower = np.array(FlorisPower)
FlorisPowerTuned = np.array(FlorisPowerTuned)
SOWFApower = SOWFApower_cs_6D*1E-3
# print error_turbine2
plot_data_vs_model(ax=PosPowAx[0, 1], modelx=posrange,
modely=FlorisPower,
modellabel='FLORIS', modelcolor=floris_color, modelline=floris_line,
xscalar=distance_scalar, yscalar=power_scalar)
plot_data_vs_model(ax=PosPowAx[0, 1], datax=posrange, datay=SOWFApower, modelx=posrange,
modely=GaussianPower, title='6D', xlabel='y/D', ylabel='Power (MW)',
datalabel='SOWFA', modellabel='Gauss', modelcolor=gauss_color, modelline=gauss_line,
xscalar=distance_scalar, yscalar=power_scalar)
# plot_data_vs_model(ax=PosPowAx[0, 1], datax=posrange, datay=SOWFApower, modelx=posrange,
# modely=FlorisPowerTuned, title='6D', xlabel='y/D', ylabel='Power (MW)',
# datalabel='SOWFA', modellabel='Floris Re-Tuned', modelcolor=floris_tuned_color,
# modelline=floris_tuned_line, xscalar=distance_scalar, yscalar=power_scalar)
FlorisError += np.sum((SOWFApower[:, 1]-FlorisPower[:, 1])**2)
GaussError += np.sum((SOWFApower[:, 1]-GaussianPower[:, 1])**2)
FlorisTunedError += np.sum((SOWFApower[:, 1]-FlorisPowerTuned[:, 1])**2)
# position crosswind 7D
posrange = ICOWESdata['pos'][0]
GaussianPower = list()
FlorisPower = list()
FlorisPowerTuned = list()
for prob in probs:
prob['windSpeeds'] = np.array([ICOWESvelocity])
for pos2 in posrange:
# Define turbine locations and orientation
effUdXY = 0.523599
Xinit = np.array([1118.1, 1881.9])
Yinit = np.array([1279.5, 1720.5])
XY = np.array([Xinit, Yinit]) + np.dot(np.array([[np.cos(effUdXY), -np.sin(effUdXY)],
[np.sin(effUdXY), np.cos(effUdXY)]]),
np.array([[0., 0], [0, pos2]]))
for prob in probs:
prob['turbineX'] = XY[0, :]
prob['turbineY'] = XY[1, :]
prob.run()
GaussianPower.append(list(probs[0]['wtPower0']))
FlorisPower.append(list(probs[1]['wtPower0']))
FlorisPowerTuned.append(list(probs[2]['wtPower0']))
GaussianPower = np.array(GaussianPower)
FlorisPower = np.array(FlorisPower)
FlorisPowerTuned = np.array(FlorisPowerTuned)
SOWFApower = np.array([ICOWESdata['posPowerT1'][0], ICOWESdata['posPowerT2'][0]]).transpose()/1000.
# print error_turbine2
plot_data_vs_model(ax=PosPowAx[1, 0], modelx=posrange,
modely=FlorisPower,
modellabel='FLORIS', modelcolor=floris_color, modelline=floris_line,
xscalar=distance_scalar, yscalar=power_scalar)
plot_data_vs_model(ax=PosPowAx[1, 0], datax=posrange, datay=SOWFApower, modelx=posrange,
modely=GaussianPower, title='7D', xlabel='y/D', ylabel='Power (MW)',
datalabel='SOWFA', modellabel='Gauss', modelcolor=gauss_color, modelline=gauss_line,
xscalar=distance_scalar, yscalar=power_scalar)
# plot_data_vs_model(ax=PosPowAx[1, 0], datax=posrange, datay=SOWFApower, modelx=posrange,
# modely=FlorisPowerTuned, title='7D', xlabel='y/D', ylabel='Power (MW)',
# datalabel='SOWFA', modellabel='Floris Re-Tuned', modelcolor=floris_tuned_color,
# modelline=floris_tuned_line, xscalar=distance_scalar, yscalar=power_scalar)
FlorisError += np.sum((SOWFApower[:, 1]-FlorisPower[:, 1])**2)
GaussError += np.sum((SOWFApower[:, 1]-GaussianPower[:, 1])**2)
FlorisTunedError += np.sum((SOWFApower[:, 1]-FlorisPowerTuned[:, 1])**2)
# position downstream
posrange = np.array(list(posrange_ds))*rotor_diameter
GaussianPower = list()
FlorisPower = list()
FlorisPowerTuned = list()
for prob in probs:
prob['windSpeeds'] = np.array([PFvelocity])
prob['turbineY'] = np.array([0.0, 0.0])
prob['windDirections'] = np.array([270.0])
for pos2 in posrange:
for prob in probs:
prob['turbineX'] = np.array([0.0, pos2])
prob.run()
GaussianPower.append(list(probs[0]['wtPower0']))
FlorisPower.append(list(probs[1]['wtPower0']))
FlorisPowerTuned.append(list(probs[2]['wtPower0']))
GaussianPower = np.array(GaussianPower)
FlorisPower = np.array(FlorisPower)
FlorisPowerTuned = np.array(FlorisPowerTuned)
SOWFApower = SOWFApower_ds*1E-3
# print error_turbine2
plot_data_vs_model(ax=PosPowAx[1, 1], modelx=posrange,
modely=FlorisPower,
modellabel='FLORIS', modelcolor=floris_color, modelline=floris_line,
xscalar=distance_scalar, yscalar=power_scalar)
plot_data_vs_model(ax=PosPowAx[1, 1], datax=posrange, datay=SOWFApower, modelx=posrange,
modely=GaussianPower, title='Downstream', xlabel='x/D', ylabel='Power (MW)',
datalabel='SOWFA', modellabel='Gauss', modelcolor=gauss_color, modelline=gauss_line,
xscalar=distance_scalar, yscalar=power_scalar)
# plot_data_vs_model(ax=PosPowAx[1, 1], datax=posrange, datay=SOWFApower, modelx=posrange,
# modely=FlorisPowerTuned, title='Downstream', xlabel='x/D', ylabel='Power (MW)',
# datalabel='SOWFA', modellabel='Floris Re-Tuned', modelcolor=floris_tuned_color,
# modelline=floris_tuned_line, xscalar=distance_scalar, yscalar=power_scalar)
FlorisError += np.sum((SOWFApower[:, 1]-FlorisPower[:, 1])**2)
GaussError += np.sum((SOWFApower[:, 1]-GaussianPower[:, 1])**2)
FlorisTunedError += np.sum((SOWFApower[:, 1]-FlorisPowerTuned[:, 1])**2)
print 'Floris error: ', FlorisError, ' Gauss error: ', GaussError, 'Floris Re-Tuned Error: ', FlorisTunedError
# ################## compare velocity ######################
PosVelFig, PosVelAx = plt.subplots(ncols=2, nrows=2, sharey=False)
# velocity crosswind 7D
posrange = np.linspace(-3.*rotor_diameter, 3.*rotor_diameter, num=1000)
for prob in probs:
prob['yaw0'] = np.array([0.0, 0.0])
prob['windDirections'] = np.array([270.])
prob['turbineX'] = np.array([0, 7.*rotor_diameter])
GaussianVelocity = list()
FlorisVelocity = list()
FlorisVelocityTuned = list()
for pos2 in posrange:
for prob in probs:
prob['turbineY'] = np.array([0, pos2])
prob.run()
GaussianVelocity.append(list(probs[0]['wtVelocity0']))
FlorisVelocity.append(list(probs[1]['wtVelocity0']))
FlorisVelocityTuned.append(list(probs[2]['wtVelocity0']))
FlorisVelocity = np.array(FlorisVelocity)
GaussianVelocity = np.array(GaussianVelocity)
FlorisVelocityTuned = np.array(FlorisVelocityTuned)
plot_data_vs_model(ax=PosVelAx[1, 0], modelx=posrange,
modely=FlorisVelocity/PFvelocity,
modellabel='FLORIS', modelcolor=floris_color, modelline=floris_line,
xscalar=distance_scalar, yscalar=velocity_scalar, sum=False)
plot_data_vs_model(ax=PosVelAx[1, 0], modelx=posrange, modely=GaussianVelocity/PFvelocity, title='7D',
xlabel='y/D', ylabel='Velocity (m/s)',
modellabel='Gauss', modelcolor=gauss_color, modelline=gauss_line,
xscalar=distance_scalar, yscalar=velocity_scalar, sum=False)
# plot_data_vs_model(ax=PosVelAx[1, 0], modelx=posrange, modely=FlorisVelocityTuned/PFvelocity, title='7D',
# xlabel='y/D', ylabel='Velocity (m/s)',
# modellabel='Floris Re-Tuned', modelcolor=floris_tuned_color,
# modelline=floris_tuned_line, xscalar=distance_scalar, yscalar=velocity_scalar, sum=False)
# plt.legend()
# plt.show()
# velocity downstream inline
posrange = np.linspace(-1.*rotor_diameter, 30.*rotor_diameter, num=1000)
for prob in probs:
prob['turbineY'] = np.array([0, 0])
GaussianVelocity = list()
FlorisVelocity = list()
FlorisVelocityTuned = list()
for pos2 in posrange:
for prob in probs:
prob['turbineX'] = np.array([0, pos2])
prob.run()
GaussianVelocity.append(list(probs[0]['wtVelocity0']))
FlorisVelocity.append(list(probs[1]['wtVelocity0']))
FlorisVelocityTuned.append(list(probs[2]['wtVelocity0']))
FlorisVelocity = np.array(FlorisVelocity)
GaussianVelocity = np.array(GaussianVelocity)
FlorisVelocityTuned = np.array(FlorisVelocityTuned)
plot_data_vs_model(ax=PosVelAx[1, 1], modelx=posrange,
modely=FlorisVelocity/PFvelocity, modellabel='FLORIS',
modelcolor=floris_color, modelline=floris_line,
xscalar=distance_scalar, yscalar=velocity_scalar, sum=False, front=False, legend=True)
plot_data_vs_model(ax=PosVelAx[1, 1], modelx=posrange, modely=GaussianVelocity/PFvelocity, title='Downstream (inline)',
xlabel='y/D', ylabel='Velocity (m/s)',
modellabel='Gauss', modelcolor=gauss_color, modelline=gauss_line,
xscalar=distance_scalar, yscalar=velocity_scalar, sum=False, front=False, legend=True)
# plot_data_vs_model(ax=PosVelAx[1, 1], modelx=posrange, modely=FlorisVelocityTuned/PFvelocity, title='Downstream (inline)',
# xlabel='y/D', ylabel='Velocity (m/s)',
# modellabel='Floris Re-Tuned', modelcolor=floris_tuned_color,
# modelline=floris_tuned_line, xscalar=distance_scalar, yscalar=velocity_scalar,
# sum=False, front=False, legend=True)
PosVelAx[1, 1].plot(np.array([7.0, 7.0]), np.array([0.0, 1.2]), ':k', label='Tuning Point')
plt.xlabel('x/D')
plt.ylabel('Velocity (m/s)')
# plt.legend(loc=4,labels=['FLORIS, SOWFA, BPA'])
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.