code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
import gdbremote_testcase
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestGdbRemoteSegFault(gdbremote_testcase.GdbRemoteTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
GDB_REMOTE_STOP_CODE_BAD_ACCESS = 0x91
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def inferior_seg_fault_received(self, expected_signo):
procs = self.prep_debug_monitor_and_inferior(
inferior_args=["segfault"])
self.assertIsNotNone(procs)
self.test_sequence.add_log_lines(["read packet: $vCont;c#a8",
{"direction": "send",
"regex": r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$",
"capture": {1: "hex_exit_code"}},
],
True)
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
hex_exit_code = context.get("hex_exit_code")
self.assertIsNotNone(hex_exit_code)
self.assertEqual(int(hex_exit_code, 16), expected_signo)
@debugserver_test
def test_inferior_seg_fault_received_debugserver(self):
self.init_debugserver_test()
self.build()
self.inferior_seg_fault_received(self.GDB_REMOTE_STOP_CODE_BAD_ACCESS)
@skipIfWindows # No signal is sent on Windows.
@llgs_test
def test_inferior_seg_fault_received_llgs(self):
self.init_llgs_test()
self.build()
self.inferior_seg_fault_received(lldbutil.get_signal_number('SIGSEGV'))
| endlessm/chromium-browser | third_party/llvm/lldb/test/API/tools/lldb-server/inferior-crash/TestGdbRemoteSegFault.py | Python | bsd-3-clause | 1,724 |
class DuplicateEntryError(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class ResourceError(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class DataFormatError(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter) | seantauber/Psycloud | custom_exceptions.py | Python | gpl-2.0 | 469 |
# Neural network layers (or function nodes)
# Includes an in-memory data layer, label layer, convolution, pooling, dropout, and other popular operations. Also includes cross-entropy, softmax, hinge, and squared loss layers.
# Author: Sameh Khamis (sameh@umiacs.umd.edu)
# License: GPLv2 for non-commercial research purposes only
import numpy as np
import numeric
from ..image import im2col, col2im, transform, invtransform
DTYPE = np.float32
class Node:
def __init__(self):
# don't save the outputs too! the circular refs make python's gc fail big
self._input = []
self._value = np.array([])
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self):
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
pass
@property
def input(self):
return self._input
@property
def value(self):
return self._value
@property
def gradient(self):
return self._gradient
def __mul__(self, other):
return ScalarMul(self, other) if np.isscalar(other) else Mul(self, other)
def __rmul__(self, other):
return ScalarMul(self, other) if np.isscalar(other) else Mul(other, self)
def __add__(self, other):
return Add(self, other)
def __radd__(self, other):
return Add(other, self)
def __pow__(self, other):
return ScalarPow(self, other)
def __neg__(self):
return Neg(self)
@property
def T(self):
return Trans(self)
@property
def shape(self):
return self._value.shape
@property
def size(self):
return self._value.size
@property
def dtype(self):
return self._value.dtype
@property
def ndim(self):
return self._value.ndim
def __str__(self):
return '%s [%s]' % (self.__class__.__name__, 'x'.join([str(s) for s in self.shape]))
def __setstate__(self, dict):
self.__dict__ = dict
if not isinstance(self, Param):
self.__dict__['_value'] = np.empty(self.__dict__['_value_shape'], dtype=DTYPE)
del self.__dict__['_value_shape']
def __getstate__(self):
dict = self.__dict__.copy()
del dict['_gradient']
if '_mask' in dict: del dict['_mask']
if '_col' in dict: del dict['_col']
if '_temp' in dict: del dict['_temp']
if isinstance(self, Label):
del dict['_result']
if not isinstance(self, Param):
dict['_value_shape'] = self.__dict__['_value'].shape
del dict['_value']
return dict
class Op(Node):
pass # Base class of non-data, non-param, and non-label layers
class Data(Node):
def __init__(self, data_mean_or_shape):
if type(data_mean_or_shape) is tuple:
data_mean = np.zeros(data_mean_or_shape, dtype=DTYPE)
else:
data_mean = data_mean_or_shape
self._input = []
self._mean = data_mean
self._value = np.zeros(data_mean.shape, dtype=DTYPE)[np.newaxis]
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self):
self._value = self._value - self._mean
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
class Preprocess(Op):
pass # Base class of data preprocessing (crop, contrast, tint, skew, etc.)
class Crop(Preprocess):
def __init__(self, input, cropsize):
self._input = [input]
self._cropsize = cropsize
self._value = np.empty((input.shape[0], cropsize[0], cropsize[1], input.shape[3]), dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self, disabled=False):
high = np.array(self._input[0].shape[1:3]) - self._cropsize
self._pos = np.array([np.random.randint(h) for h in high])
self._value = self._input[0]._value[:, self._pos[0]:self._pos[0] + self._cropsize[0], self._pos[1]:self._pos[1] + self._cropsize[1], :]
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self, disabled=False):
self._input[0]._gradient[:, self._pos[0]:self._pos[0] + self._cropsize[0], self._pos[1]:self._pos[1] + self._cropsize[1], :] += self._gradient
class Mirror(Preprocess):
def __init__(self, input):
self._input = [input]
self._value = np.empty(input.shape, dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self, disabled=False):
self._flip = not disabled and np.random.rand() > 0.5
self._value = self._input[0]._value[:, :, ::-1, :] if self._flip else self._input[0]._value
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self, disabled=False):
self._input[0]._gradient += self._gradient[:, :, ::-1, :] if self._flip else self._gradient
class Rotate(Preprocess):
def __init__(self, input, minangle=-15, maxangle=15):
self._input = [input]
self._angles = (minangle, maxangle)
self._value = np.empty(input.shape, dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self, disabled=False):
theta = np.random.randn() * ((self._angles[1] - self._angles[0]) / 6.0) if not disabled else 0 # 6 sigma
sin_theta, cos_theta = np.sin(theta * np.pi / 180), np.cos(theta * np.pi / 180)
self._A = np.array([cos_theta, sin_theta, -sin_theta, cos_theta], dtype=np.float32).reshape(2, 2)
self._value = transform(self._input[0]._value, self._A)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self, disabled=False):
self._input[0]._gradient += invtransform(self._gradient, self._A)
class Shear(Preprocess):
def __init__(self, input, minshearx=-0.75, maxshearx=0.75, minsheary=-0.75, maxsheary=0.75):
self._input = [input]
self._shearx = (minshearx, maxshearx)
self._sheary = (minsheary, maxsheary)
self._value = np.empty(input.shape, dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self, disabled=False):
mx = np.random.randn() * ((self._shearx[1] - self._shearx[0]) / 6.0) if not disabled else 0 # 6 sigma
my = np.random.randn() * ((self._sheary[1] - self._sheary[0]) / 6.0) if not disabled else 0 # 6 sigma
self._A = np.array([1, mx, my, 1], dtype=np.float32).reshape(2, 2)
self._value = transform(self._input[0]._value, self._A)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self, disabled=False):
self._input[0]._gradient += invtransform(self._gradient, self._A)
class Param(Node):
def __init__(self, val):
self._input = []
self._value = val.astype(DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
self._fixed = False
@staticmethod
def zeros(shape):
return Param(np.zeros(shape, dtype=DTYPE))
@staticmethod
def randn(shape, var=-1):
if var < 0:
var = np.sqrt(2.0 / np.prod(shape))
return Param(var * np.random.randn(*shape).astype(DTYPE))
class FC(Op):
def __init__(self, input, ndim):
shp = input.shape
w = Param.randn((np.prod(shp[1:]), ndim))
b = Param.zeros((ndim,))
self._input = [input, w, b]
self._value = np.empty((shp[0], ndim), dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self):
# FC = w.dot(x) + b
data = self._input[0]._value.reshape(self._input[0].shape[0], -1)
self._value = np.dot(data, self._input[1]._value) + self._input[2]._value
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
self._input[0]._gradient += self._gradient.dot(self._input[1]._value.T).reshape(self._input[0]._gradient.shape)
data = self._input[0]._value.reshape(self._input[0].shape[0], -1)
self._input[1]._gradient += data.T.dot(self._gradient)
self._input[2]._gradient += self._gradient.sum(axis=0)
class Affine(FC):
def __init__(self, input, w, b):
shp = input.shape
self._input = [input, w, b]
self._value = np.empty((shp[0], b.size), dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
class Conv(Op):
def __init__(self, input, nfilters, window=5, stride=1):
shp = input.shape
w = Param.randn((nfilters, window, window, shp[3]))
b = Param.zeros((nfilters))
self._input = [input, w, b]
self._window = window
self._nfilters = nfilters
self._stride = stride
self._value = np.empty((shp[0], shp[1], shp[2], nfilters), dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self):
n = self._input[0].shape[0]
shp = self.shape
# Reshape images to (count, channels, height, width), then apply im2col
im = self._input[0]._value.transpose((0, 3, 1, 2))
self._col = im2col(im, self._window, self._window, (self._window - 1) / 2, self._stride)
# Now that all the windows are in matrix form, calculate w.dot(col) + b
w = self._input[1]._value.reshape(self._nfilters, -1)
b = self._input[2]._value.reshape(-1, 1)
self._value = np.dot(w, self._col) + b
# Reshape result from (nfilters, -1) to (count, height, width, nfilters)
self._value = self._value.reshape(shp[3], n, shp[1], shp[2]).transpose((1, 2, 3, 0))
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
# Reshape gradient to (nfilters, -1) and back-propagate through the dot product
gradient = self._gradient.transpose((3, 0, 1, 2)).reshape(self._nfilters, -1)
self._input[1]._gradient += gradient.dot(self._col.T).reshape(self._input[1]._gradient.shape)
self._input[2]._gradient += gradient.sum(axis=1)
# The gradient w.r.t the images is similar, but we need to aggregate the results over the windows
w = self._input[1]._value.reshape(self._nfilters, -1)
shp = self._input[0].shape
imgradient = col2im(w.T.dot(gradient), shp[0], shp[3], shp[1], shp[2], self._window, self._window, (self._window - 1) / 2, self._stride)
# Reshape the result back to (count, height, width, channels)
self._input[0]._gradient += imgradient.transpose((0, 2, 3, 1))
class Pool(Op):
def __init__(self, input, window=2, stride=2):
shp = input.shape
self._input = [input]
self._window = window
self._stride = stride
self._value = np.empty((shp[0], (shp[1] - window) / stride + 1, (shp[2] - window) / stride + 1, shp[3]), dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self):
n = self._input[0].shape[0]
shp = self.shape
# Reshape images to (count, channels, height, width), then apply im2col
im = self._input[0]._value.transpose((0, 3, 1, 2))
col = im2col(im, self._window, self._window, 0, self._stride)
col = col.reshape(self._window * self._window, im.shape[1], -1).transpose((1, 2, 0))
# Find the maximum in every window and store its index using a mask
self._mask = col.argmax(axis=2)
self._mask = (self._mask[:, :, np.newaxis] == np.arange(self._window * self._window))
self._value = col[self._mask].reshape(shp[3], n, shp[1], shp[2]).transpose((1, 2, 3, 0))
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
shp = self._input[0].shape
# The gradient is calculate using the mask indices then aggregated over the windows
gradient = self._gradient.transpose((3, 0, 1, 2)).reshape(1, shp[3], -1)
col = (self._mask.transpose((2, 0, 1)) * gradient).reshape(-1, gradient.shape[2])
imgradient = col2im(col, shp[0], shp[3], shp[1], shp[2], self._window, self._window, 0, self._stride)
# Reshape the result back to (count, height, width, channels)
self._input[0]._gradient += imgradient.transpose((0, 2, 3, 1))
class ScalarMul(Op):
def __init__(self, input, scalar=1):
self._input = [input]
self._scalar = scalar
self._value = np.empty((input.shape), dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self):
self._value = self._input[0]._value * self._scalar
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
self._input[0]._gradient += self._gradient * self._scalar
class ScalarPow(Op):
def __init__(self, input, scalar=1):
self._input = [input]
self._scalar = scalar
self._value = np.empty((input.shape), dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self):
self._temp = self._input[0]._value**(self._scalar - 1)
self._value = self._temp * self._input[0]._value
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
self._input[0]._gradient += self._scalar * self._temp * self._gradient
class Max(Op):
def __init__(self, input1, input2):
assert input1.ndim == input2.ndim
for i in np.arange(input1.ndim):
assert input1.shape[i] == input2.shape[i]
self._input = [input1, input2]
self._value = np.empty((input1.shape), dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self):
self._mask = self._input[0]._value > self._input[1]._value
self._value = np.where(self._mask, self._input[0]._value, self._input[1]._value)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
self._input[0]._gradient += np.where(self._mask, self._gradient, 0)
self._input[1]._gradient += np.where(self._mask, 0, self._gradient)
class Relu(Op):
def __init__(self, input, leak=0.01):
self._input = [input]
self._value = np.empty((input.shape), dtype=DTYPE)
self._leak = leak
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self):
self._mask = self._input[0]._value > 0
self._value = np.where(self._mask, self._input[0]._value, self._leak * self._input[0]._value)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
self._input[0]._gradient += np.where(self._mask, self._gradient, self._leak * self._gradient)
class Dropout(Op):
def __init__(self, input, prob=0.5):
self._input = [input]
self._value = np.empty((input.shape), dtype=DTYPE)
self._prob = prob
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self, disabled=False):
if disabled:
self._value = self._input[0]._value
else:
shp = self._input[0].shape
self._mask = np.random.rand(*shp).astype(DTYPE) < self._prob
self._value = np.where(self._mask, self._input[0]._value / self._prob, 0)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self, disabled=False):
if disabled:
self._input[0]._gradient += self._gradient
else:
self._input[0]._gradient += np.where(self._mask, self._gradient / self._prob, 0)
class Dot(Op):
def __init__(self, input1, input2):
assert input1.ndim == 2 and input2.ndim == 2
assert input1.shape[1] == input2.shape[0]
self._input = [input1, input2]
self._value = np.empty((input1.shape[0], input[2].shape[1]), dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self):
self._value = np.dot(self._input[0]._value, self._input[1]._value)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
self._input[0]._gradient += self._gradient.dot(self._input[1]._value.T)
self._input[1]._gradient += self._input[0]._value.T.dot(self._gradient)
class Mul(Op):
def __init__(self, input1, input2):
self._input = [input1, input2]
self._value = np.empty((input1.shape), dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self):
self._value = self._input[0]._value * self._input[1]._value
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
self._input[0]._gradient += self._gradient * self._input[1]._value
self._input[1]._gradient += self._gradient * self._input[0]._value
class Add(Op):
def __init__(self, input1, input2):
self._input = [input1, input2]
self._value = np.empty((input1.shape), dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self):
self._value = self._input[0]._value + self._input[1]._value
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
self._input[0]._gradient += self._gradient
self._input[1]._gradient += self._gradient
class Concat(Op):
def __init__(self, input1, input2):
self._input = [input1, input2]
self._value = np.empty((input1.shape[0], input1.shape[1] + input2.shape[1]), dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self):
self._value = np.c_[self._input[0]._value.reshape(self._input[0].shape[0], -1), self._input[1]._value.reshape(self._input[1].shape[0], -1)]
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
k = np.prod(self._input[0].shape[1:])
self._input[0]._gradient += self._gradient[:, :k].reshape(self._input[0]._gradient.shape)
self._input[1]._gradient += self._gradient[:, k:].reshape(self._input[1]._gradient.shape)
class Neg(Op):
def __init__(self, input):
self._input = [input]
self._value = np.empty((input.shape), dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self):
self._value = -self._input[0]._value
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
self._input[0]._gradient -= self._gradient
class Abs(Op):
def __init__(self, input):
self._input = [input]
self._value = np.empty((input.shape), dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self):
self._value = np.abs(self._input[0]._value)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
self._input[0]._gradient += self._gradient * np.sign(self._input[0]._value)
class Trans(Op):
def __init__(self, input):
self._input = [input]
self._value = np.empty((input.shape), dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self):
self._value = self._input[0]._value.T
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
self._input[0]._gradient += self._gradient.T
class Sigmoid(Op):
def __init__(self, input):
self._input = [input]
self._value = np.empty((input.shape), dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self):
self._value = numeric.sigmoid(self._input[0]._value)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
self._input[0]._gradient += self._value * (1 - self._value) * self._gradient
class Tanh(Op):
def __init__(self, input):
self._input = [input]
self._value = np.empty((input.shape), dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self):
self._value = 2 * numeric.sigmoid(2 * self._input[0]._value) - 1 # tanh = 2 sig(2x) - 1
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
self._input[0]._gradient += (1 - self._value * self._value) * self._gradient # 1 - tanh^2
class Sum(Op):
def __init__(self, input):
self._input = [input]
self._value = np.empty((1,), dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def forward(self):
self._value = np.sum(self._input[0]._value).reshape(1)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
self._input[0]._gradient += self._gradient
class Loss(Op):
def __init__(self, input, label):
self._input = [input, label]
self._value = np.empty((1,), dtype=DTYPE)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
@property
def labels(self):
return self._input[1]._value
@property
def result(self):
return self._input[1]._result # result from label
class Label(Node):
def __init__(self):
self._input = []
self._value = np.zeros(1, dtype=np.int32)
self._result = np.zeros(1, dtype=np.int32)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
@property
def result(self):
return self._result
class Xent(Loss):
def forward(self):
# A stable calculation, xent(sigmoid(x)) = (1 - t) + log(1 + exp(-x))
self._input[1]._result = numeric.sigmoid(self._input[0]._value)
labels = self._input[1]._value
xent = (1 - labels.astype(DTYPE)) * self._input[0]._value + numeric.log1pexp(-self._input[0]._value)
self._value = (np.sum(xent) / DTYPE(xent.size)).reshape(1)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
diff = self._input[1]._result - self._input[1]._value
self._input[0]._gradient += self._gradient * diff / DTYPE(diff.size)
class Softmax(Loss):
def forward(self):
self._input[1]._result = numeric.softmax(self._input[0]._value)
labels = self._input[1]._value
logz = numeric.logsumexp(self._input[0]._value)
value = logz - self._input[0]._value[np.arange(labels.shape[0]), labels.reshape(-1)]
self._value = (np.sum(value) / DTYPE(value.size)).reshape(1)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
diff = self._input[1]._result - numeric.onehot(self._input[1]._value, self._input[0].shape[1])
self._input[0]._gradient += self._gradient * diff / DTYPE(diff.size)
class Hinge(Loss):
def forward(self):
self._input[1]._result = self._input[0]._value
labels = self._input[1]._value
self._target = (2 * labels - 1)
value = 1 - self._target * self._input[0]._value
self._mask = value > 0
self._value = (np.where(self._mask, value, 0).sum() / DTYPE(value.size)).reshape(1)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
self._input[0]._gradient += self._gradient * np.where(self._mask, -self._target, 0) / DTYPE(self._target.size)
class MultiHinge(Loss):
def forward(self):
self._input[1]._result = self._input[0]._value
all = np.arange(self._input[1]._value.shape[0])
labels = self._input[1]._value
correct = self._input[0]._value[all, labels.reshape(-1)]
value = self._input[0]._value - correct.reshape(-1, 1) + 1 # y_delta = 1
value[all, labels.reshape(-1)] = 0 # t_delta = 0
self._argmax = np.argmax(value, axis=1)
value = value[all, self._argmax]
self._mask = value > 0
self._value = (np.where(self._mask, value, 0).sum() / DTYPE(value.size)).reshape(1)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
all = np.arange(self._input[1]._value.shape[0])
labels = self._input[1]._value
mask = np.zeros(self._input[0].shape, dtype=DTYPE)
mask[all[self._mask], self._argmax[self._mask]] = 1
mask[all[self._mask], labels.reshape(-1)[self._mask]] = -1
self._input[0]._gradient += self._gradient * mask / DTYPE(mask.size)
class Squared(Loss):
def forward(self):
self._input[1]._result = self._input[0]._value
self._diff = self._input[1]._value - self._input[0]._value
self._value = (0.5 * np.sum(self._diff**2) / DTYPE(self._diff.size)).reshape(1)
self._gradient = np.zeros(self._value.shape, dtype=DTYPE)
def backward(self):
self._input[0]._gradient += self._gradient * -self._diff / DTYPE(self._diff.size)
| samehkhamis/pydeeplearn | pydeeplearn/core/layers.py | Python | gpl-2.0 | 25,826 |
#
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010 by Joel Andersson, Moritz Diehl, K.U.Leuven. All rights reserved.
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# -*- coding: utf-8 -*-
from casadi import *
from numpy import *
import matplotlib.pyplot as plt
# Time
t = SX("t")
# Differential states
s = SX("s"); v = SX("v"); m = SX("m")
y = [s,v,m]
# Control
u = SX("u")
alpha = 0.05 # friction
beta = 0.1 # fuel consumption rate
# Differential equation
sdot = v
vdot = (u-alpha*v*v)/m
mdot = -beta*u*u
rhs = [sdot,vdot,mdot]
# ODE right hand side
ffcn = SXFunction([[t],y,[u]],[rhs])
ffcn.setOption("name","ODE right hand side")
# Explicit integrator (CVODES)
integrator = CVodesIntegrator(ffcn)
# Set options
integrator.setOption("fsens_err_con",True)
integrator.setOption("quad_err_con",True)
integrator.setOption("abstol",1e-6)
integrator.setOption("reltol",1e-6)
# Initialize the integrator
integrator.init()
# Time horizon
T = 10.0
# Shooting length
nu = 100 # Number of control segments
DT = T/nu
# Initial position, speed and mass
s0 = 0 # initial position
v0 = 0 # initial speed
m0 = 1 # initial mass
# control for all segments
U = MX("U",nu)
# Dummy input corresponding to the state derivative
xdot = MX([0,0,0])
# Integrate over all intervals
X=MX([s0,v0,m0])
T0 = MX(0) # Beginning of time interval (changed from k*DT due to probable Sundials bug)
TF = MX(DT) # End of time interval (changed from (k+1)*DT due to probable Sundials bug)
for k in range(nu):
# build up a graph with function calls
X = integrator([T0,TF,X,U[k],xdot])
# Objective function
F = inner_prod(U,U)
# Terminal constraints
G = vertcat((X[0],X[1]))
# Create the NLP
ffcn = MXFunction([U],[F]) # objective function
gfcn = MXFunction([U],[G]) # constraint function
# Allocate an NLP solver
solver = IpoptSolver(ffcn,gfcn)
# Set options
solver.setOption("tol",1e-10)
solver.setOption("hessian_approximation","limited-memory");
# initialize the solver
solver.init()
# Bounds on u and initial condition
Umin = nu * [-10] # lower bound
solver.setInput(Umin,"lbx")
Umax = nu * [10] # upper bound
solver.setInput(Umax,"ubx")
Usol = nu * [0.4] # initial guess
solver.setInput(Usol,"x0")
# Bounds on g
Gmin = Gmax = [10, 0]
solver.setInput(Gmin,"lbg")
solver.setInput(Gmax,"ubg")
# Solve the problem
solver.solve()
# Get the solution
uopt = solver.output("x")
# Plot the optimal trajectory
tgrid = linspace(0,T,nu+1)
tgrid_u = linspace(0,T,nu)
plt.figure(1)
plt.clf()
plt.ylabel('Optimal control')
plt.xlabel('time')
plt.plot(tgrid_u,uopt)
x = [0, 0, 1]
sopt = [x[0]]
vopt = [x[1]]
mopt = [x[2]]
for k in range(nu):
integrator.setInput(k*DT,"t0")
integrator.setInput((k+1)*DT,"tf")
integrator.setInput(uopt[k],"p")
integrator.setInput(x,"x0")
integrator.evaluate()
x = integrator.output()
sopt.append(x[0])
vopt.append(x[1])
mopt.append(x[2])
plt.figure(2)
plt.clf()
plt.subplot(3,1,1)
plt.ylabel('Distance')
plt.xlabel('time')
plt.plot(tgrid,sopt)
plt.subplot(3,1,2)
plt.ylabel('Velocity')
plt.xlabel('time')
plt.plot(tgrid,vopt)
plt.subplot(3,1,3)
plt.ylabel('Mass')
plt.xlabel('time')
plt.plot(tgrid,mopt)
plt.show()
| jgillis/casadi | experimental/joel/rocket_example.py | Python | lgpl-3.0 | 3,956 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import itertools
import logging
from functools import partial
from itertools import repeat
from lxml import etree
from lxml.builder import E
import openerp
from openerp import api
from openerp import SUPERUSER_ID, models
from openerp import tools
import openerp.exceptions
from openerp.osv import fields, osv, expression
from openerp.service.db import check_super
from openerp.tools.translate import _
from openerp.http import request
from openerp.exceptions import UserError
_logger = logging.getLogger(__name__)
# Only users who can modify the user (incl. the user herself) see the real contents of these fields
USER_PRIVATE_FIELDS = ['password']
#----------------------------------------------------------
# Basic res.groups and res.users
#----------------------------------------------------------
class res_groups(osv.osv):
_name = "res.groups"
_description = "Access Groups"
_rec_name = 'full_name'
_order = 'name'
def _get_full_name(self, cr, uid, ids, field, arg, context=None):
res = {}
for g in self.browse(cr, SUPERUSER_ID, ids, context=context):
if g.category_id:
res[g.id] = '%s / %s' % (g.category_id.name, g.name)
else:
res[g.id] = g.name
return res
def _search_group(self, cr, uid, obj, name, args, context=None):
operand = args[0][2]
operator = args[0][1]
lst = True
if isinstance(operand, bool):
domains = [[('name', operator, operand)], [('category_id.name', operator, operand)]]
if operator in expression.NEGATIVE_TERM_OPERATORS == (not operand):
return expression.AND(domains)
else:
return expression.OR(domains)
if isinstance(operand, basestring):
lst = False
operand = [operand]
where = []
for group in operand:
values = filter(bool, group.split('/'))
group_name = values.pop().strip()
category_name = values and '/'.join(values).strip() or group_name
group_domain = [('name', operator, lst and [group_name] or group_name)]
category_domain = [('category_id.name', operator, lst and [category_name] or category_name)]
if operator in expression.NEGATIVE_TERM_OPERATORS and not values:
category_domain = expression.OR([category_domain, [('category_id', '=', False)]])
if (operator in expression.NEGATIVE_TERM_OPERATORS) == (not values):
sub_where = expression.AND([group_domain, category_domain])
else:
sub_where = expression.OR([group_domain, category_domain])
if operator in expression.NEGATIVE_TERM_OPERATORS:
where = expression.AND([where, sub_where])
else:
where = expression.OR([where, sub_where])
return where
_columns = {
'name': fields.char('Name', required=True, translate=True),
'users': fields.many2many('res.users', 'res_groups_users_rel', 'gid', 'uid', 'Users'),
'model_access': fields.one2many('ir.model.access', 'group_id', 'Access Controls', copy=True),
'rule_groups': fields.many2many('ir.rule', 'rule_group_rel',
'group_id', 'rule_group_id', 'Rules', domain=[('global', '=', False)]),
'menu_access': fields.many2many('ir.ui.menu', 'ir_ui_menu_group_rel', 'gid', 'menu_id', 'Access Menu'),
'view_access': fields.many2many('ir.ui.view', 'ir_ui_view_group_rel', 'group_id', 'view_id', 'Views'),
'comment' : fields.text('Comment', size=250, translate=True),
'category_id': fields.many2one('ir.module.category', 'Application', select=True),
'color': fields.integer('Color Index'),
'full_name': fields.function(_get_full_name, type='char', string='Group Name', fnct_search=_search_group),
'share': fields.boolean('Share Group',
help="Group created to set access rights for sharing data with some users.")
}
_sql_constraints = [
('name_uniq', 'unique (category_id, name)', 'The name of the group must be unique within an application!')
]
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
# add explicit ordering if search is sorted on full_name
if order and order.startswith('full_name'):
ids = super(res_groups, self).search(cr, uid, args, context=context)
gs = self.browse(cr, uid, ids, context)
gs.sort(key=lambda g: g.full_name, reverse=order.endswith('DESC'))
gs = gs[offset:offset+limit] if limit else gs[offset:]
return map(int, gs)
return super(res_groups, self).search(cr, uid, args, offset, limit, order, context, count)
def copy(self, cr, uid, id, default=None, context=None):
group_name = self.read(cr, uid, [id], ['name'])[0]['name']
default.update({'name': _('%s (copy)')%group_name})
return super(res_groups, self).copy(cr, uid, id, default, context)
def write(self, cr, uid, ids, vals, context=None):
if 'name' in vals:
if vals['name'].startswith('-'):
raise UserError(_('The name of the group can not start with "-"'))
res = super(res_groups, self).write(cr, uid, ids, vals, context=context)
self.pool['ir.model.access'].call_cache_clearing_methods(cr)
self.pool['res.users'].has_group.clear_cache(self.pool['res.users'])
return res
class ResUsersLog(osv.Model):
_name = 'res.users.log'
_order = 'id desc'
# Currenly only uses the magical fields: create_uid, create_date,
# for recording logins. To be extended for other uses (chat presence, etc.)
class res_users(osv.osv):
""" User class. A res.users record models an OpenERP user and is different
from an employee.
res.users class now inherits from res.partner. The partner model is
used to store the data related to the partner: lang, name, address,
avatar, ... The user model is now dedicated to technical data.
"""
__uid_cache = {}
_inherits = {
'res.partner': 'partner_id',
}
_name = "res.users"
_description = 'Users'
_order = 'name, login'
def _set_new_password(self, cr, uid, id, name, value, args, context=None):
if value is False:
# Do not update the password if no value is provided, ignore silently.
# For example web client submits False values for all empty fields.
return
if uid == id:
# To change their own password users must use the client-specific change password wizard,
# so that the new password is immediately used for further RPC requests, otherwise the user
# will face unexpected 'Access Denied' exceptions.
raise UserError(_('Please use the change password wizard (in User Preferences or User menu) to change your own password.'))
self.write(cr, uid, id, {'password': value})
def _get_password(self, cr, uid, ids, arg, karg, context=None):
return dict.fromkeys(ids, '')
def _is_share(self, cr, uid, ids, name, args, context=None):
res = {}
for user in self.browse(cr, uid, ids, context=context):
res[user.id] = not self.has_group(cr, user.id, 'base.group_user')
return res
def _store_trigger_share_res_groups(self, cr, uid, ids, context=None):
group_user = self.pool['ir.model.data'].xmlid_to_object(cr, SUPERUSER_ID, 'base.group_user', context=context)
if group_user and group_user.id in ids:
return group_user.users.ids
return []
def _get_users_from_group(self, cr, uid, ids, context=None):
result = set()
groups = self.pool['res.groups'].browse(cr, uid, ids, context=context)
# Clear cache to avoid perf degradation on databases with thousands of users
groups.invalidate_cache()
for group in groups:
result.update(user.id for user in group.users)
return list(result)
_columns = {
'id': fields.integer('ID'),
'partner_id': fields.many2one('res.partner', required=True,
string='Related Partner', ondelete='restrict',
help='Partner-related data of the user', auto_join=True),
'login': fields.char('Login', size=64, required=True,
help="Used to log into the system"),
'password': fields.char('Password', size=64, invisible=True, copy=False,
help="Keep empty if you don't want the user to be able to connect on the system."),
'new_password': fields.function(_get_password, type='char', size=64,
fnct_inv=_set_new_password, string='Set Password',
help="Specify a value only when creating a user or if you're "\
"changing the user's password, otherwise leave empty. After "\
"a change of password, the user has to login again."),
'signature': fields.html('Signature'),
'active': fields.boolean('Active'),
'action_id': fields.many2one('ir.actions.actions', 'Home Action', help="If specified, this action will be opened at log on for this user, in addition to the standard menu."),
'groups_id': fields.many2many('res.groups', 'res_groups_users_rel', 'uid', 'gid', 'Groups'),
# Special behavior for this field: res.company.search() will only return the companies
# available to the current user (should be the user's companies?), when the user_preference
# context is set.
'company_id': fields.many2one('res.company', 'Company', required=True,
help='The company this user is currently working for.', context={'user_preference': True}),
'company_ids':fields.many2many('res.company','res_company_users_rel','user_id','cid','Companies'),
'share': fields.function(_is_share, string='Share User', type='boolean',
store={
'res.users': (lambda self, cr, uid, ids, c={}: ids, ['groups_id'], 50),
'res.groups': (_store_trigger_share_res_groups, ['users'], 50),
}, help="External user with limited access, created only for the purpose of sharing data."),
}
# overridden inherited fields to bypass access rights, in case you have
# access to the user but not its corresponding partner
name = openerp.fields.Char(related='partner_id.name', inherited=True)
email = openerp.fields.Char(related='partner_id.email', inherited=True)
log_ids = openerp.fields.One2many('res.users.log', 'create_uid', string='User log entries')
login_date = openerp.fields.Datetime(related='log_ids.create_date', string='Latest connection')
def on_change_login(self, cr, uid, ids, login, context=None):
if login and tools.single_email_re.match(login):
return {'value': {'email': login}}
return {}
def onchange_state(self, cr, uid, ids, state_id, context=None):
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
return self.pool.get('res.partner').onchange_state(cr, uid, partner_ids, state_id, context=context)
def onchange_parent_id(self, cr, uid, ids, parent_id, context=None):
""" Wrapper on the user.partner onchange_address, because some calls to the
partner form view applied to the user may trigger the
partner.onchange_type method, but applied to the user object.
"""
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
return self.pool['res.partner'].onchange_address(cr, uid, partner_ids, parent_id, context=context)
def _check_company(self, cr, uid, ids, context=None):
return all(((this.company_id in this.company_ids) or not this.company_ids) for this in self.browse(cr, uid, ids, context))
_constraints = [
(_check_company, 'The chosen company is not in the allowed companies for this user', ['company_id', 'company_ids']),
]
_sql_constraints = [
('login_key', 'UNIQUE (login)', 'You can not have two users with the same login !')
]
def _get_company(self,cr, uid, context=None, uid2=False):
if not uid2:
uid2 = uid
# Use read() to compute default company, and pass load=_classic_write to
# avoid useless name_get() calls. This will avoid prefetching fields
# while computing default values for new db columns, as the
# db backend may not be fully initialized yet.
user_data = self.pool['res.users'].read(cr, uid, uid2, ['company_id'],
context=context, load='_classic_write')
comp_id = user_data['company_id']
return comp_id or False
def _get_companies(self, cr, uid, context=None):
c = self._get_company(cr, uid, context)
if c:
return [c]
return False
def _get_group(self,cr, uid, context=None):
dataobj = self.pool.get('ir.model.data')
result = []
try:
dummy,group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_user')
result.append(group_id)
dummy,group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_partner_manager')
result.append(group_id)
except ValueError:
# If these groups does not exists anymore
pass
return result
_defaults = {
'password': '',
'active': True,
'customer': False,
'company_id': _get_company,
'company_ids': _get_companies,
'groups_id': _get_group,
}
# User can write on a few of his own fields (but not his groups for example)
SELF_WRITEABLE_FIELDS = ['password', 'signature', 'action_id', 'company_id', 'email', 'name', 'image', 'image_medium', 'image_small', 'lang', 'tz']
# User can read a few of his own fields
SELF_READABLE_FIELDS = ['signature', 'company_id', 'login', 'email', 'name', 'image', 'image_medium', 'image_small', 'lang', 'tz', 'tz_offset', 'groups_id', 'partner_id', '__last_update', 'action_id']
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
def override_password(o):
if ('id' not in o or o['id'] != uid):
for f in USER_PRIVATE_FIELDS:
if f in o:
o[f] = '********'
return o
if fields and (ids == [uid] or ids == uid):
for key in fields:
if not (key in self.SELF_READABLE_FIELDS or key.startswith('context_')):
break
else:
# safe fields only, so we read as super-user to bypass access rights
uid = SUPERUSER_ID
result = super(res_users, self).read(cr, uid, ids, fields=fields, context=context, load=load)
canwrite = self.pool['ir.model.access'].check(cr, uid, 'res.users', 'write', False)
if not canwrite:
if isinstance(ids, (int, long)):
result = override_password(result)
else:
result = map(override_password, result)
return result
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
if uid != SUPERUSER_ID:
groupby_fields = set([groupby] if isinstance(groupby, basestring) else groupby)
if groupby_fields.intersection(USER_PRIVATE_FIELDS):
raise openerp.exceptions.AccessError('Invalid groupby')
return super(res_users, self).read_group(
cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy)
def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
if user != SUPERUSER_ID and args:
domain_terms = [term for term in args if isinstance(term, (tuple, list))]
domain_fields = set(left for (left, op, right) in domain_terms)
if domain_fields.intersection(USER_PRIVATE_FIELDS):
raise openerp.exceptions.AccessError('Invalid search criterion')
return super(res_users, self)._search(
cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count,
access_rights_uid=access_rights_uid)
def create(self, cr, uid, vals, context=None):
user_id = super(res_users, self).create(cr, uid, vals, context=context)
user = self.browse(cr, uid, user_id, context=context)
if user.partner_id.company_id:
user.partner_id.write({'company_id': user.company_id.id})
return user_id
def write(self, cr, uid, ids, values, context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
if values.get('active') == False:
for current_id in ids:
if current_id == SUPERUSER_ID:
raise UserError(_("You cannot unactivate the admin user."))
elif current_id == uid:
raise UserError(_("You cannot unactivate the user you're currently logged in as."))
if ids == [uid]:
for key in values.keys():
if not (key in self.SELF_WRITEABLE_FIELDS or key.startswith('context_')):
break
else:
if 'company_id' in values:
user = self.browse(cr, SUPERUSER_ID, uid, context=context)
if not (values['company_id'] in user.company_ids.ids):
del values['company_id']
uid = 1 # safe fields only, so we write as super-user to bypass access rights
res = super(res_users, self).write(cr, uid, ids, values, context=context)
if 'company_id' in values:
for user in self.browse(cr, uid, ids, context=context):
# if partner is global we keep it that way
if user.partner_id.company_id and user.partner_id.company_id.id != values['company_id']:
user.partner_id.write({'company_id': user.company_id.id})
# clear default ir values when company changes
self.pool['ir.values'].get_defaults_dict.clear_cache(self.pool['ir.values'])
# clear caches linked to the users
self.pool['ir.model.access'].call_cache_clearing_methods(cr)
clear = partial(self.pool['ir.rule'].clear_cache, cr)
map(clear, ids)
db = cr.dbname
if db in self.__uid_cache:
for id in ids:
if id in self.__uid_cache[db]:
del self.__uid_cache[db][id]
self.context_get.clear_cache(self)
self.has_group.clear_cache(self)
return res
def unlink(self, cr, uid, ids, context=None):
if 1 in ids:
raise UserError(_('You can not remove the admin user as it is used internally for resources created by Odoo (updates, module installation, ...)'))
db = cr.dbname
if db in self.__uid_cache:
for id in ids:
if id in self.__uid_cache[db]:
del self.__uid_cache[db][id]
return super(res_users, self).unlink(cr, uid, ids, context=context)
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if not args:
args=[]
if not context:
context={}
ids = []
if name and operator in ['=', 'ilike']:
ids = self.search(cr, user, [('login','=',name)]+ args, limit=limit, context=context)
if not ids:
ids = self.search(cr, user, [('name',operator,name)]+ args, limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
def copy(self, cr, uid, id, default=None, context=None):
user2copy = self.read(cr, uid, [id], ['login','name'])[0]
default = dict(default or {})
if ('name' not in default) and ('partner_id' not in default):
default['name'] = _("%s (copy)") % user2copy['name']
if 'login' not in default:
default['login'] = _("%s (copy)") % user2copy['login']
return super(res_users, self).copy(cr, uid, id, default, context)
@tools.ormcache('uid')
def context_get(self, cr, uid, context=None):
user = self.browse(cr, SUPERUSER_ID, uid, context)
result = {}
for k in self._fields:
if k.startswith('context_'):
context_key = k[8:]
elif k in ['lang', 'tz']:
context_key = k
else:
context_key = False
if context_key:
res = getattr(user, k) or False
if isinstance(res, models.BaseModel):
res = res.id
result[context_key] = res or False
return result
def action_get(self, cr, uid, context=None):
dataobj = self.pool['ir.model.data']
data_id = dataobj._get_id(cr, SUPERUSER_ID, 'base', 'action_res_users_my')
return dataobj.browse(cr, uid, data_id, context=context).res_id
def check_super(self, passwd):
return check_super(passwd)
def check_credentials(self, cr, uid, password):
""" Override this method to plug additional authentication methods"""
res = self.search(cr, SUPERUSER_ID, [('id','=',uid),('password','=',password)])
if not res:
raise openerp.exceptions.AccessDenied()
def _update_last_login(self, cr, uid):
# only create new records to avoid any side-effect on concurrent transactions
# extra records will be deleted by the periodical garbage collection
self.pool['res.users.log'].create(cr, uid, {}) # populated by defaults
def _login(self, db, login, password):
if not password:
return False
user_id = False
try:
with self.pool.cursor() as cr:
res = self.search(cr, SUPERUSER_ID, [('login','=',login)])
if res:
user_id = res[0]
self.check_credentials(cr, user_id, password)
self._update_last_login(cr, user_id)
except openerp.exceptions.AccessDenied:
_logger.info("Login failed for db:%s login:%s", db, login)
user_id = False
return user_id
def authenticate(self, db, login, password, user_agent_env):
"""Verifies and returns the user ID corresponding to the given
``login`` and ``password`` combination, or False if there was
no matching user.
:param str db: the database on which user is trying to authenticate
:param str login: username
:param str password: user password
:param dict user_agent_env: environment dictionary describing any
relevant environment attributes
"""
uid = self._login(db, login, password)
if uid == openerp.SUPERUSER_ID:
# Successfully logged in as admin!
# Attempt to guess the web base url...
if user_agent_env and user_agent_env.get('base_location'):
cr = self.pool.cursor()
try:
base = user_agent_env['base_location']
ICP = self.pool['ir.config_parameter']
if not ICP.get_param(cr, uid, 'web.base.url.freeze'):
ICP.set_param(cr, uid, 'web.base.url', base)
cr.commit()
except Exception:
_logger.exception("Failed to update web.base.url configuration parameter")
finally:
cr.close()
return uid
def check(self, db, uid, passwd):
"""Verifies that the given (uid, password) is authorized for the database ``db`` and
raise an exception if it is not."""
if not passwd:
# empty passwords disallowed for obvious security reasons
raise openerp.exceptions.AccessDenied()
if self.__uid_cache.setdefault(db, {}).get(uid) == passwd:
return
cr = self.pool.cursor()
try:
self.check_credentials(cr, uid, passwd)
self.__uid_cache[db][uid] = passwd
finally:
cr.close()
def change_password(self, cr, uid, old_passwd, new_passwd, context=None):
"""Change current user password. Old password must be provided explicitly
to prevent hijacking an existing user session, or for cases where the cleartext
password is not used to authenticate requests.
:return: True
:raise: openerp.exceptions.AccessDenied when old password is wrong
:raise: except_osv when new password is not set or empty
"""
self.check(cr.dbname, uid, old_passwd)
if new_passwd:
return self.write(cr, uid, uid, {'password': new_passwd})
raise UserError(_("Setting empty passwords is not allowed for security reasons!"))
def preference_save(self, cr, uid, ids, context=None):
return {
'type': 'ir.actions.client',
'tag': 'reload_context',
}
def preference_change_password(self, cr, uid, ids, context=None):
return {
'type': 'ir.actions.client',
'tag': 'change_password',
'target': 'new',
}
@tools.ormcache('uid', 'group_ext_id')
def has_group(self, cr, uid, group_ext_id):
"""Checks whether user belongs to given group.
:param str group_ext_id: external ID (XML ID) of the group.
Must be provided in fully-qualified form (``module.ext_id``), as there
is no implicit module to use..
:return: True if the current user is a member of the group with the
given external ID (XML ID), else False.
"""
assert group_ext_id and '.' in group_ext_id, "External ID must be fully qualified"
module, ext_id = group_ext_id.split('.')
cr.execute("""SELECT 1 FROM res_groups_users_rel WHERE uid=%s AND gid IN
(SELECT res_id FROM ir_model_data WHERE module=%s AND name=%s)""",
(uid, module, ext_id))
return bool(cr.fetchone())
@api.multi
def _is_admin(self):
return self.id == openerp.SUPERUSER_ID or self.sudo(self).has_group('base.group_erp_manager')
def get_company_currency_id(self, cr, uid, context=None):
return self.browse(cr, uid, uid, context=context).company_id.currency_id.id
#----------------------------------------------------------
# Implied groups
#
# Extension of res.groups and res.users with a relation for "implied"
# or "inherited" groups. Once a user belongs to a group, it
# automatically belongs to the implied groups (transitively).
#----------------------------------------------------------
class cset(object):
""" A cset (constrained set) is a set of elements that may be constrained to
be a subset of other csets. Elements added to a cset are automatically
added to its supersets. Cycles in the subset constraints are supported.
"""
def __init__(self, xs):
self.supersets = set()
self.elements = set(xs)
def subsetof(self, other):
if other is not self:
self.supersets.add(other)
other.update(self.elements)
def update(self, xs):
xs = set(xs) - self.elements
if xs: # xs will eventually be empty in case of a cycle
self.elements.update(xs)
for s in self.supersets:
s.update(xs)
def __iter__(self):
return iter(self.elements)
concat = itertools.chain.from_iterable
class groups_implied(osv.osv):
_inherit = 'res.groups'
def _get_trans_implied(self, cr, uid, ids, field, arg, context=None):
"computes the transitive closure of relation implied_ids"
memo = {} # use a memo for performance and cycle avoidance
def computed_set(g):
if g not in memo:
memo[g] = cset(g.implied_ids)
for h in g.implied_ids:
computed_set(h).subsetof(memo[g])
return memo[g]
res = {}
for g in self.browse(cr, SUPERUSER_ID, ids, context):
res[g.id] = map(int, computed_set(g))
return res
_columns = {
'implied_ids': fields.many2many('res.groups', 'res_groups_implied_rel', 'gid', 'hid',
string='Inherits', help='Users of this group automatically inherit those groups'),
'trans_implied_ids': fields.function(_get_trans_implied,
type='many2many', relation='res.groups', string='Transitively inherits'),
}
def create(self, cr, uid, values, context=None):
users = values.pop('users', None)
gid = super(groups_implied, self).create(cr, uid, values, context)
if users:
# delegate addition of users to add implied groups
self.write(cr, uid, [gid], {'users': users}, context)
return gid
def write(self, cr, uid, ids, values, context=None):
res = super(groups_implied, self).write(cr, uid, ids, values, context)
if values.get('users') or values.get('implied_ids'):
# add all implied groups (to all users of each group)
for g in self.browse(cr, uid, ids, context=context):
gids = map(int, g.trans_implied_ids)
vals = {'users': [(4, u.id) for u in g.users]}
super(groups_implied, self).write(cr, uid, gids, vals, context)
return res
class users_implied(osv.osv):
_inherit = 'res.users'
def create(self, cr, uid, values, context=None):
groups = values.pop('groups_id', None)
user_id = super(users_implied, self).create(cr, uid, values, context)
if groups:
# delegate addition of groups to add implied groups
self.write(cr, uid, [user_id], {'groups_id': groups}, context)
self.pool['ir.ui.view'].clear_cache()
return user_id
def write(self, cr, uid, ids, values, context=None):
if not isinstance(ids,list):
ids = [ids]
res = super(users_implied, self).write(cr, uid, ids, values, context)
if values.get('groups_id'):
# add implied groups for all users
for user in self.browse(cr, uid, ids):
gs = set(concat(g.trans_implied_ids for g in user.groups_id))
vals = {'groups_id': [(4, g.id) for g in gs]}
super(users_implied, self).write(cr, uid, [user.id], vals, context)
self.pool['ir.ui.view'].clear_cache()
return res
#----------------------------------------------------------
# Vitrual checkbox and selection for res.user form view
#
# Extension of res.groups and res.users for the special groups view in the users
# form. This extension presents groups with selection and boolean widgets:
# - Groups are shown by application, with boolean and/or selection fields.
# Selection fields typically defines a role "Name" for the given application.
# - Uncategorized groups are presented as boolean fields and grouped in a
# section "Others".
#
# The user form view is modified by an inherited view (base.user_groups_view);
# the inherited view replaces the field 'groups_id' by a set of reified group
# fields (boolean or selection fields). The arch of that view is regenerated
# each time groups are changed.
#
# Naming conventions for reified groups fields:
# - boolean field 'in_group_ID' is True iff
# ID is in 'groups_id'
# - selection field 'sel_groups_ID1_..._IDk' is ID iff
# ID is in 'groups_id' and ID is maximal in the set {ID1, ..., IDk}
#----------------------------------------------------------
def name_boolean_group(id):
return 'in_group_' + str(id)
def name_selection_groups(ids):
return 'sel_groups_' + '_'.join(map(str, ids))
def is_boolean_group(name):
return name.startswith('in_group_')
def is_selection_groups(name):
return name.startswith('sel_groups_')
def is_reified_group(name):
return is_boolean_group(name) or is_selection_groups(name)
def get_boolean_group(name):
return int(name[9:])
def get_selection_groups(name):
return map(int, name[11:].split('_'))
def partition(f, xs):
"return a pair equivalent to (filter(f, xs), filter(lambda x: not f(x), xs))"
yes, nos = [], []
for x in xs:
(yes if f(x) else nos).append(x)
return yes, nos
def parse_m2m(commands):
"return a list of ids corresponding to a many2many value"
ids = []
for command in commands:
if isinstance(command, (tuple, list)):
if command[0] in (1, 4):
ids.append(command[1])
elif command[0] == 5:
ids = []
elif command[0] == 6:
ids = list(command[2])
else:
ids.append(command)
return ids
class groups_view(osv.osv):
_inherit = 'res.groups'
def create(self, cr, uid, values, context=None):
res = super(groups_view, self).create(cr, uid, values, context)
self.update_user_groups_view(cr, uid, context)
# ir_values.get_actions() depends on action records
self.pool['ir.values'].clear_caches()
return res
def write(self, cr, uid, ids, values, context=None):
res = super(groups_view, self).write(cr, uid, ids, values, context)
self.update_user_groups_view(cr, uid, context)
# ir_values.get_actions() depends on action records
self.pool['ir.values'].clear_caches()
return res
def unlink(self, cr, uid, ids, context=None):
res = super(groups_view, self).unlink(cr, uid, ids, context)
self.update_user_groups_view(cr, uid, context)
# ir_values.get_actions() depends on action records
self.pool['ir.values'].clear_caches()
return res
def update_user_groups_view(self, cr, uid, context=None):
# the view with id 'base.user_groups_view' inherits the user form view,
# and introduces the reified group fields
# we have to try-catch this, because at first init the view does not exist
# but we are already creating some basic groups
user_context = dict(context or {})
if user_context.get('install_mode'):
# use installation/admin language for translatable names in the view
user_context.update(self.pool['res.users'].context_get(cr, uid))
view = self.pool['ir.model.data'].xmlid_to_object(cr, SUPERUSER_ID, 'base.user_groups_view', context=user_context)
if view and view.exists() and view._name == 'ir.ui.view':
group_no_one = view.env.ref('base.group_no_one')
xml1, xml2 = [], []
xml1.append(E.separator(string=_('Application'), colspan="2"))
for app, kind, gs in self.get_groups_by_application(cr, uid, user_context):
# hide groups in category 'Hidden' (except to group_no_one)
attrs = {'groups': 'base.group_no_one'} if app and (app.xml_id == 'base.module_category_hidden' or app.xml_id == 'base.module_category_extra') else {}
if kind == 'selection':
# application name with a selection field
field_name = name_selection_groups(map(int, gs))
xml1.append(E.field(name=field_name, **attrs))
xml1.append(E.newline())
else:
# application separator with boolean fields
app_name = app and app.name or _('Other')
xml2.append(E.separator(string=app_name, colspan="4", **attrs))
for g in gs:
field_name = name_boolean_group(g.id)
if g == group_no_one:
# make the group_no_one invisible in the form view
xml2.append(E.field(name=field_name, invisible="1", **attrs))
else:
xml2.append(E.field(name=field_name, **attrs))
xml2.append({'class': "o_label_nowrap"})
xml = E.field(E.group(*(xml1), col="2"), E.group(*(xml2), col="4"), name="groups_id", position="replace")
xml.addprevious(etree.Comment("GENERATED AUTOMATICALLY BY GROUPS"))
xml_content = etree.tostring(xml, pretty_print=True, xml_declaration=True, encoding="utf-8")
view.with_context(context, lang=None).write({'arch': xml_content})
return True
def get_application_groups(self, cr, uid, domain=None, context=None):
if domain is None:
domain = []
domain.append(('share', '=', False))
return self.search(cr, uid, domain, context=context)
def get_groups_by_application(self, cr, uid, context=None):
""" return all groups classified by application (module category), as a list of pairs:
[(app, kind, [group, ...]), ...],
where app and group are browse records, and kind is either 'boolean' or 'selection'.
Applications are given in sequence order. If kind is 'selection', the groups are
given in reverse implication order.
"""
def linearized(gs):
gs = set(gs)
# determine sequence order: a group should appear after its implied groups
order = dict.fromkeys(gs, 0)
for g in gs:
for h in gs.intersection(g.trans_implied_ids):
order[h] -= 1
# check whether order is total, i.e., sequence orders are distinct
if len(set(order.itervalues())) == len(gs):
return sorted(gs, key=lambda g: order[g])
return None
# classify all groups by application
gids = self.get_application_groups(cr, uid, context=context)
by_app, others = {}, []
for g in self.browse(cr, uid, gids, context):
if g.category_id:
by_app.setdefault(g.category_id, []).append(g)
else:
others.append(g)
# build the result
res = []
apps = sorted(by_app.iterkeys(), key=lambda a: a.sequence or 0)
for app in apps:
gs = linearized(by_app[app])
if gs:
res.append((app, 'selection', gs))
else:
res.append((app, 'boolean', by_app[app]))
if others:
res.append((False, 'boolean', others))
return res
class users_view(osv.osv):
_inherit = 'res.users'
def create(self, cr, uid, values, context=None):
values = self._remove_reified_groups(values)
return super(users_view, self).create(cr, uid, values, context)
def write(self, cr, uid, ids, values, context=None):
values = self._remove_reified_groups(values)
return super(users_view, self).write(cr, uid, ids, values, context)
def _remove_reified_groups(self, values):
""" return `values` without reified group fields """
add, rem = [], []
values1 = {}
for key, val in values.iteritems():
if is_boolean_group(key):
(add if val else rem).append(get_boolean_group(key))
elif is_selection_groups(key):
rem += get_selection_groups(key)
if val:
add.append(val)
else:
values1[key] = val
if 'groups_id' not in values and (add or rem):
# remove group ids in `rem` and add group ids in `add`
values1['groups_id'] = zip(repeat(3), rem) + zip(repeat(4), add)
return values1
def default_get(self, cr, uid, fields, context=None):
group_fields, fields = partition(is_reified_group, fields)
fields1 = (fields + ['groups_id']) if group_fields else fields
values = super(users_view, self).default_get(cr, uid, fields1, context)
self._add_reified_groups(group_fields, values)
# add "default_groups_ref" inside the context to set default value for group_id with xml values
if 'groups_id' in fields and isinstance(context.get("default_groups_ref"), list):
groups = []
ir_model_data = self.pool.get('ir.model.data')
for group_xml_id in context["default_groups_ref"]:
group_split = group_xml_id.split('.')
if len(group_split) != 2:
raise UserError(_('Invalid context default_groups_ref value (model.name_id) : "%s"') % group_xml_id)
try:
temp, group_id = ir_model_data.get_object_reference(cr, uid, group_split[0], group_split[1])
except ValueError:
group_id = False
groups += [group_id]
values['groups_id'] = groups
return values
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
# determine whether reified groups fields are required, and which ones
fields1 = fields or self.fields_get(cr, uid, context=context).keys()
group_fields, other_fields = partition(is_reified_group, fields1)
# read regular fields (other_fields); add 'groups_id' if necessary
drop_groups_id = False
if group_fields and fields:
if 'groups_id' not in other_fields:
other_fields.append('groups_id')
drop_groups_id = True
else:
other_fields = fields
res = super(users_view, self).read(cr, uid, ids, other_fields, context=context, load=load)
# post-process result to add reified group fields
if group_fields:
for values in (res if isinstance(res, list) else [res]):
self._add_reified_groups(group_fields, values)
if drop_groups_id:
values.pop('groups_id', None)
return res
def _add_reified_groups(self, fields, values):
""" add the given reified group fields into `values` """
gids = set(parse_m2m(values.get('groups_id') or []))
for f in fields:
if is_boolean_group(f):
values[f] = get_boolean_group(f) in gids
elif is_selection_groups(f):
selected = [gid for gid in get_selection_groups(f) if gid in gids]
values[f] = selected and selected[-1] or False
def fields_get(self, cr, uid, allfields=None, context=None, write_access=True, attributes=None):
res = super(users_view, self).fields_get(cr, uid, allfields, context, write_access, attributes)
# add reified groups fields
if not self.pool['res.users']._is_admin(cr, uid, [uid]):
return res
for app, kind, gs in self.pool['res.groups'].get_groups_by_application(cr, uid, context):
if kind == 'selection':
# selection group field
tips = ['%s: %s' % (g.name, g.comment) for g in gs if g.comment]
res[name_selection_groups(map(int, gs))] = {
'type': 'selection',
'string': app and app.name or _('Other'),
'selection': [(False, '')] + [(g.id, g.name) for g in gs],
'help': '\n'.join(tips),
'exportable': False,
'selectable': False,
}
else:
# boolean group fields
for g in gs:
res[name_boolean_group(g.id)] = {
'type': 'boolean',
'string': g.name,
'help': g.comment,
'exportable': False,
'selectable': False,
}
return res
#----------------------------------------------------------
# change password wizard
#----------------------------------------------------------
class change_password_wizard(osv.TransientModel):
"""
A wizard to manage the change of users' passwords
"""
_name = "change.password.wizard"
_description = "Change Password Wizard"
_columns = {
'user_ids': fields.one2many('change.password.user', 'wizard_id', string='Users'),
}
def _default_user_ids(self, cr, uid, context=None):
if context is None:
context = {}
user_model = self.pool['res.users']
user_ids = context.get('active_model') == 'res.users' and context.get('active_ids') or []
return [
(0, 0, {'user_id': user.id, 'user_login': user.login})
for user in user_model.browse(cr, uid, user_ids, context=context)
]
_defaults = {
'user_ids': _default_user_ids,
}
def change_password_button(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids, context=context)[0]
need_reload = any(uid == user.user_id.id for user in wizard.user_ids)
line_ids = [user.id for user in wizard.user_ids]
self.pool.get('change.password.user').change_password_button(cr, uid, line_ids, context=context)
if need_reload:
return {
'type': 'ir.actions.client',
'tag': 'reload'
}
return {'type': 'ir.actions.act_window_close'}
class change_password_user(osv.TransientModel):
"""
A model to configure users in the change password wizard
"""
_name = 'change.password.user'
_description = 'Change Password Wizard User'
_columns = {
'wizard_id': fields.many2one('change.password.wizard', string='Wizard', required=True),
'user_id': fields.many2one('res.users', string='User', required=True),
'user_login': fields.char('User Login', readonly=True),
'new_passwd': fields.char('New Password'),
}
_defaults = {
'new_passwd': '',
}
def change_password_button(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
line.user_id.write({'password': line.new_passwd})
# don't keep temporary passwords in the database longer than necessary
self.write(cr, uid, ids, {'new_passwd': False}, context=context)
| syci/OCB | openerp/addons/base/res/res_users.py | Python | agpl-3.0 | 46,553 |
"""
Context dictionary for templates that use the ace_common base template.
"""
from __future__ import absolute_import
from django.conf import settings
from django.core.urlresolvers import NoReverseMatch
from django.urls import reverse
from edxmako.shortcuts import marketing_link
from openedx.core.djangoapps.theming.helpers import get_config_value_from_site_or_settings
def get_base_template_context(site):
"""
Dict with entries needed for all templates that use the base template.
"""
# When on LMS and a dashboard is available, use that as the dashboard url.
# Otherwise, use the home url instead.
try:
dashboard_url = reverse('dashboard')
except NoReverseMatch:
dashboard_url = reverse('home')
return {
# Platform information
'homepage_url': marketing_link('ROOT'),
'dashboard_url': dashboard_url,
'template_revision': getattr(settings, 'EDX_PLATFORM_REVISION', None),
'platform_name': get_config_value_from_site_or_settings(
'PLATFORM_NAME',
site=site,
site_config_name='platform_name',
),
'contact_email': get_config_value_from_site_or_settings(
'CONTACT_EMAIL', site=site, site_config_name='contact_email'),
'contact_mailing_address': get_config_value_from_site_or_settings(
'CONTACT_MAILING_ADDRESS', site=site, site_config_name='contact_mailing_address'),
'social_media_urls': get_config_value_from_site_or_settings('SOCIAL_MEDIA_FOOTER_URLS', site=site),
'mobile_store_urls': get_config_value_from_site_or_settings('MOBILE_STORE_URLS', site=site),
}
| ESOedX/edx-platform | openedx/core/djangoapps/ace_common/template_context.py | Python | agpl-3.0 | 1,658 |
from flask import (
Flask,
escape,
flash,
request,
jsonify,
redirect,
url_for,
render_template,
send_file,
)
import numpy as np
import os
from sklearn.externals import joblib
knn = joblib.load("knn.pkl")
app = Flask(__name__)
@app.route("/")
def hello():
print("Started hello")
name = request.args.get("name", "my friend")
return f"<h2>Hello, {escape(name)}!</h2>"
@app.route("/square/<username>")
def squar_val(username):
username = float(username) * float(username)
return str(username)
def average(lst):
return sum(lst) / len(lst)
@app.route("/avg/<nums>")
def avg(nums):
nums = nums.split(",")
nums = [float(num) for num in nums]
return str(average(nums))
@app.route("/iris/<params>")
def fit_predict_iris(params):
params = params.split(",")
params = np.array([float(num) for num in params]).reshape(1, -1)
print("Input params:", params)
predict = knn.predict(params)
img_path = '<br><img src="/static/setosa.jpg" alt="Setoca iris flower" width="500" height="600">'
return str(predict) + img_path
@app.route("/show_image")
def show_image():
print("image loaded")
return '<img src="/static/setosa.jpg" alt="Setoca iris flower" width="500" height="600">'
@app.route("/iris_post", methods=["POST"])
def add_message():
try:
content = request.get_json()
params = content["flower"].split(",")
params = np.array([float(num) for num in params]).reshape(1, -1)
print("Input params:", params)
predict = {"class": str(knn.predict(params)[0])}
except:
return redirect(url_for("bad_request"))
return jsonify(predict)
from flask import abort
@app.route("/badrequest400")
def bad_request():
abort(400)
from flask_wtf import FlaskForm
from wtforms import StringField, FileField
from werkzeug.utils import secure_filename
from wtforms.validators import DataRequired
import pandas as pd
UPLOAD_FOLDER = ""
ALLOWED_EXTENSIONS = set(["txt", "pdf", "png", "jpg", "jpeg", "gif"])
app.config.update(
dict(SECRET_KEY="powerful secretkey", WTF_CSRF_SECRET_KEY="a csrf secret key")
)
app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER
class MyForm(FlaskForm):
name = StringField("name", validators=[DataRequired()])
file = FileField()
@app.route("/submit", methods=("GET", "POST"))
def submit():
form = MyForm()
if form.validate_on_submit():
f = form.file.data
filename = form.name.data + ".csv"
# f.save(os.path.join(
# filename
# ))
df = pd.read_csv(f, header=None)
print(df.head())
predict = knn.predict(df)
result = pd.DataFrame(predict)
result.to_csv(filename, index=False)
return send_file(
filename,
mimetype="text/csv",
attachment_filename=filename,
as_attachment=True,
)
return render_template("submit.html", form=form)
def allowed_file(filename):
return "." in filename and filename.rsplit(".", 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route("/upload", methods=["GET", "POST"])
def upload_file():
if request.method == "POST":
# check if the post request has the file part
if "file" not in request.files:
flash("No file part")
return redirect(request.url)
file = request.files["file"]
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == "":
flash("No selected file")
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config["UPLOAD_FOLDER"], filename))
return "file uploaded"
return """
<!doctype html>
<title>Upload new File</title>
<h1>Upload new File</h1>
<form method=post enctype=multipart/form-data>
<input type=file name=file>
<input type=submit value=Upload>
</form>
"""
| Diyago/Machine-Learning-scripts | deployment/docker flask fit predict/hello.py | Python | apache-2.0 | 4,068 |
# Copyright (c) 2015 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import ddt
import mock
from manila.common import constants
from manila import exception
from manila.share import configuration
from manila.share.drivers.windows import windows_smb_helper
from manila.share.drivers.windows import windows_utils
from manila import test
from oslo_config import cfg
CONF = cfg.CONF
CONF.import_opt('share_mount_path',
'manila.share.drivers.generic')
@ddt.ddt
class WindowsSMBHelperTestCase(test.TestCase):
_FAKE_SERVER = {'public_address': mock.sentinel.public_address}
_FAKE_SHARE_NAME = "fake_share_name"
_FAKE_SHARE = "\\\\%s\\%s" % (_FAKE_SERVER['public_address'],
_FAKE_SHARE_NAME)
_FAKE_SHARE_LOCATION = os.path.join(
configuration.Configuration(None).share_mount_path,
_FAKE_SHARE_NAME)
_FAKE_ACCOUNT_NAME = 'FakeDomain\\FakeUser'
_FAKE_RW_ACC_RULE = {
'access_to': _FAKE_ACCOUNT_NAME,
'access_level': constants.ACCESS_LEVEL_RW,
'access_type': 'user',
}
def setUp(self):
self._remote_exec = mock.Mock()
fake_conf = configuration.Configuration(None)
self._win_smb_helper = windows_smb_helper.WindowsSMBHelper(
self._remote_exec, fake_conf)
super(WindowsSMBHelperTestCase, self).setUp()
def test_init_helper(self):
self._win_smb_helper.init_helper(mock.sentinel.server)
self._remote_exec.assert_called_once_with(mock.sentinel.server,
"Get-SmbShare")
@ddt.data(True, False)
@mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_share_exists')
def test_create_export(self, share_exists, mock_share_exists):
mock_share_exists.return_value = share_exists
result = self._win_smb_helper.create_export(self._FAKE_SERVER,
self._FAKE_SHARE_NAME)
if not share_exists:
cmd = ['New-SmbShare', '-Name', self._FAKE_SHARE_NAME, '-Path',
self._win_smb_helper._windows_utils.normalize_path(
self._FAKE_SHARE_LOCATION),
'-ReadAccess', "*%s" % self._win_smb_helper._NULL_SID]
self._remote_exec.assert_called_once_with(self._FAKE_SERVER, cmd)
else:
self.assertFalse(self._remote_exec.called)
self.assertEqual(self._FAKE_SHARE, result)
@mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_share_exists')
def test_remove_export(self, mock_share_exists):
mock_share_exists.return_value = True
self._win_smb_helper.remove_export(mock.sentinel.server,
mock.sentinel.share_name)
cmd = ['Remove-SmbShare', '-Name', mock.sentinel.share_name, "-Force"]
self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd)
@mock.patch.object(windows_utils.WindowsUtils,
'get_volume_path_by_mount_path')
@mock.patch.object(windows_smb_helper.WindowsSMBHelper,
'_get_share_path_by_name')
def test_get_volume_path_by_share_name(self, mock_get_share_path,
mock_get_vol_path):
mock_get_share_path.return_value = self._FAKE_SHARE_LOCATION
volume_path = self._win_smb_helper._get_volume_path_by_share_name(
mock.sentinel.server, self._FAKE_SHARE_NAME)
mock_get_share_path.assert_called_once_with(mock.sentinel.server,
self._FAKE_SHARE_NAME)
mock_get_vol_path.assert_called_once_with(mock.sentinel.server,
self._FAKE_SHARE_LOCATION)
self.assertEqual(mock_get_vol_path.return_value, volume_path)
@ddt.data({'raw_out': '', 'expected': []},
{'raw_out': '{"key": "val"}',
'expected': [{"key": "val"}]},
{'raw_out': '[{"key": "val"}, {"key2": "val2"}]',
'expected': [{"key": "val"}, {"key2": "val2"}]})
@ddt.unpack
def test_get_acls_helper(self, raw_out, expected):
self._remote_exec.return_value = (raw_out, mock.sentinel.err)
rules = self._win_smb_helper._get_acls(mock.sentinel.server,
self._FAKE_SHARE_NAME)
self.assertEqual(expected, rules)
expected_cmd = (
'Get-SmbShareAccess -Name %s | '
'Select-Object @("Name", "AccountName", '
'"AccessControlType", "AccessRight") | '
'ConvertTo-JSON -Compress') % self._FAKE_SHARE_NAME
self._remote_exec.assert_called_once_with(mock.sentinel.server,
expected_cmd)
@mock.patch.object(windows_smb_helper.WindowsSMBHelper,
'_get_acls')
def test_get_access_rules(self, mock_get_acls):
helper = self._win_smb_helper
valid_acl = {
'AccountName': self._FAKE_ACCOUNT_NAME,
'AccessRight': helper._WIN_ACCESS_RIGHT_FULL,
'AccessControlType': helper._WIN_ACL_ALLOW,
}
valid_acls = [valid_acl,
dict(valid_acl,
AccessRight=helper._WIN_ACCESS_RIGHT_CHANGE),
dict(valid_acl,
AccessRight=helper._WIN_ACCESS_RIGHT_READ)]
# Those are rules that were not added by us and are expected to
# be ignored. When encountering such a rule, a warning message
# will be logged.
ignored_acls = [
dict(valid_acl, AccessRight=helper._WIN_ACCESS_RIGHT_CUSTOM),
dict(valid_acl, AccessControlType=helper._WIN_ACL_DENY)]
mock_get_acls.return_value = valid_acls + ignored_acls
# There won't be multiple access rules for the same account,
# but we'll ignore this fact for the sake of this test.
expected_rules = [self._FAKE_RW_ACC_RULE, self._FAKE_RW_ACC_RULE,
dict(self._FAKE_RW_ACC_RULE,
access_level=constants.ACCESS_LEVEL_RO)]
rules = helper.get_access_rules(mock.sentinel.server,
mock.sentinel.share_name)
self.assertEqual(expected_rules, rules)
mock_get_acls.assert_called_once_with(mock.sentinel.server,
mock.sentinel.share_name)
@mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_refresh_acl')
def test_grant_share_access(self, mock_refresh_acl):
self._win_smb_helper._grant_share_access(mock.sentinel.server,
mock.sentinel.share_name,
constants.ACCESS_LEVEL_RW,
mock.sentinel.username)
cmd = ["Grant-SmbShareAccess", "-Name", mock.sentinel.share_name,
"-AccessRight", "Change",
"-AccountName", "'%s'" % mock.sentinel.username, "-Force"]
self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd)
mock_refresh_acl.assert_called_once_with(mock.sentinel.server,
mock.sentinel.share_name)
def test_refresh_acl(self):
self._win_smb_helper._refresh_acl(mock.sentinel.server,
mock.sentinel.share_name)
cmd = ['Set-SmbPathAcl', '-ShareName', mock.sentinel.share_name]
self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd)
@mock.patch.object(windows_smb_helper.WindowsSMBHelper, '_refresh_acl')
def test_revoke_share_access(self, mock_refresh_acl):
self._win_smb_helper._revoke_share_access(mock.sentinel.server,
mock.sentinel.share_name,
mock.sentinel.username)
cmd = ["Revoke-SmbShareAccess", "-Name", mock.sentinel.share_name,
"-AccountName", '"%s"' % mock.sentinel.username, "-Force"]
self._remote_exec.assert_called_once_with(mock.sentinel.server, cmd)
mock_refresh_acl.assert_called_once_with(mock.sentinel.server,
mock.sentinel.share_name)
def test_update_access_invalid_type(self):
invalid_access_rule = dict(self._FAKE_RW_ACC_RULE,
access_type='ip')
self.assertRaises(
exception.InvalidShareAccess,
self._win_smb_helper.update_access,
mock.sentinel.server, mock.sentinel.share_name,
[invalid_access_rule], [], [])
def test_update_access_invalid_level(self):
invalid_access_rule = dict(self._FAKE_RW_ACC_RULE,
access_level='fake_level')
self.assertRaises(
exception.InvalidShareAccessLevel,
self._win_smb_helper.update_access,
mock.sentinel.server, mock.sentinel.share_name,
[], [invalid_access_rule], [])
@mock.patch.object(windows_smb_helper.WindowsSMBHelper,
'_revoke_share_access')
def test_update_access_deleting_invalid_rule(self, mock_revoke):
# We want to make sure that we allow deleting invalid rules.
invalid_access_rule = dict(self._FAKE_RW_ACC_RULE,
access_level='fake_level')
delete_rules = [invalid_access_rule, self._FAKE_RW_ACC_RULE]
self._win_smb_helper.update_access(
mock.sentinel.server, mock.sentinel.share_name,
[], [], delete_rules)
mock_revoke.assert_called_once_with(
mock.sentinel.server, mock.sentinel.share_name,
self._FAKE_RW_ACC_RULE['access_to'])
@mock.patch.object(windows_smb_helper.WindowsSMBHelper,
'validate_access_rules')
@mock.patch.object(windows_smb_helper.WindowsSMBHelper,
'get_access_rules')
@mock.patch.object(windows_smb_helper.WindowsSMBHelper,
'_grant_share_access')
@mock.patch.object(windows_smb_helper.WindowsSMBHelper,
'_revoke_share_access')
def test_update_access(self, mock_revoke, mock_grant,
mock_get_access_rules, mock_validate):
added_rules = [mock.MagicMock(), mock.MagicMock()]
deleted_rules = [mock.MagicMock(), mock.MagicMock()]
self._win_smb_helper.update_access(
mock.sentinel.server, mock.sentinel.share_name,
[], added_rules, deleted_rules)
mock_revoke.assert_has_calls(
[mock.call(mock.sentinel.server, mock.sentinel.share_name,
deleted_rule['access_to'])
for deleted_rule in deleted_rules])
mock_grant.assert_has_calls(
[mock.call(mock.sentinel.server, mock.sentinel.share_name,
added_rule['access_level'], added_rule['access_to'])
for added_rule in added_rules])
@mock.patch.object(windows_smb_helper.WindowsSMBHelper,
'_get_rule_updates')
@mock.patch.object(windows_smb_helper.WindowsSMBHelper,
'validate_access_rules')
@mock.patch.object(windows_smb_helper.WindowsSMBHelper,
'get_access_rules')
@mock.patch.object(windows_smb_helper.WindowsSMBHelper,
'_grant_share_access')
@mock.patch.object(windows_smb_helper.WindowsSMBHelper,
'_revoke_share_access')
def test_update_access_maintenance(
self, mock_revoke, mock_grant,
mock_get_access_rules, mock_validate,
mock_get_rule_updates):
all_rules = mock.MagicMock()
added_rules = [mock.MagicMock(), mock.MagicMock()]
deleted_rules = [mock.MagicMock(), mock.MagicMock()]
mock_get_rule_updates.return_value = [
added_rules, deleted_rules]
self._win_smb_helper.update_access(
mock.sentinel.server, mock.sentinel.share_name,
all_rules, [], [])
mock_get_access_rules.assert_called_once_with(
mock.sentinel.server, mock.sentinel.share_name)
mock_get_rule_updates.assert_called_once_with(
existing_rules=mock_get_access_rules.return_value,
requested_rules=all_rules)
mock_revoke.assert_has_calls(
[mock.call(mock.sentinel.server, mock.sentinel.share_name,
deleted_rule['access_to'])
for deleted_rule in deleted_rules])
mock_grant.assert_has_calls(
[mock.call(mock.sentinel.server, mock.sentinel.share_name,
added_rule['access_level'], added_rule['access_to'])
for added_rule in added_rules])
def test_get_rule_updates(self):
req_rule_0 = self._FAKE_RW_ACC_RULE
req_rule_1 = dict(self._FAKE_RW_ACC_RULE,
access_to='fake_acc')
curr_rule_0 = dict(self._FAKE_RW_ACC_RULE,
access_to=self._FAKE_RW_ACC_RULE[
'access_to'].upper())
curr_rule_1 = dict(self._FAKE_RW_ACC_RULE,
access_to='fake_acc2')
curr_rule_2 = dict(req_rule_1,
access_level=constants.ACCESS_LEVEL_RO)
expected_added_rules = [req_rule_1]
expected_deleted_rules = [curr_rule_1, curr_rule_2]
existing_rules = [curr_rule_0, curr_rule_1, curr_rule_2]
requested_rules = [req_rule_0, req_rule_1]
(added_rules,
deleted_rules) = self._win_smb_helper._get_rule_updates(
existing_rules, requested_rules)
self.assertEqual(expected_added_rules, added_rules)
self.assertEqual(expected_deleted_rules, deleted_rules)
def test_get_share_name(self):
result = self._win_smb_helper._get_share_name(self._FAKE_SHARE)
self.assertEqual(self._FAKE_SHARE_NAME, result)
def test_exports_for_share(self):
result = self._win_smb_helper.get_exports_for_share(
self._FAKE_SERVER, self._FAKE_SHARE_LOCATION)
self.assertEqual([self._FAKE_SHARE], result)
def test_get_share_path_by_name(self):
self._remote_exec.return_value = (self._FAKE_SHARE_LOCATION,
mock.sentinel.std_err)
result = self._win_smb_helper._get_share_path_by_name(
mock.sentinel.server,
mock.sentinel.share_name)
cmd = ('Get-SmbShare -Name %s | '
'Select-Object -ExpandProperty Path' % mock.sentinel.share_name)
self._remote_exec.assert_called_once_with(mock.sentinel.server,
cmd,
check_exit_code=True)
self.assertEqual(self._FAKE_SHARE_LOCATION, result)
@mock.patch.object(windows_smb_helper.WindowsSMBHelper,
'_get_share_path_by_name')
def test_get_share_path_by_export_location(self,
mock_get_share_path_by_name):
mock_get_share_path_by_name.return_value = mock.sentinel.share_path
result = self._win_smb_helper.get_share_path_by_export_location(
mock.sentinel.server, self._FAKE_SHARE)
mock_get_share_path_by_name.assert_called_once_with(
mock.sentinel.server, self._FAKE_SHARE_NAME)
self.assertEqual(mock.sentinel.share_path, result)
@mock.patch.object(windows_smb_helper.WindowsSMBHelper,
'_get_share_path_by_name')
def test_share_exists(self, mock_get_share_path_by_name):
result = self._win_smb_helper._share_exists(mock.sentinel.server,
mock.sentinel.share_name)
mock_get_share_path_by_name.assert_called_once_with(
mock.sentinel.server,
mock.sentinel.share_name,
ignore_missing=True)
self.assertTrue(result)
| NetApp/manila | manila/tests/share/drivers/windows/test_windows_smb_helper.py | Python | apache-2.0 | 16,797 |
from xml.dom.minidom import parseString
from xml.dom import minidom
from MocNode import MocNode
import os
import sys
radioTypes = ['GSM', 'UMTS', 'TD', 'AG', 'MCE']
def getMocOrder(path):
files = os.listdir(path)
for afile in files:
if afile.lower().endswith("-mocinfo-sdrm.xml"):
mocinfoXML = open(afile, "r").read()
mocinfoXML = mocinfoXML.replace('encoding="GBK"', 'encoding="utf-8"')
mocOrder = MocOrder(mocinfoXML, path)
mocOrder.generateMocOrderXML()
def getType(version):
if version in radioTypes:
return "radio"
return "ground"
class MocOrder:
def __init__(self, mocinfoxml, path):
self.mocinfoxml = mocinfoxml
self.filePath = path
self.radioType = ''
self.mocs = []
self.mocNodes = {}
def parsexml(self):
domtree = parseString(self.mocinfoxml)
domcument = domtree.documentElement
if domcument.hasAttribute("version"):
self.radioType = domcument.getAttribute("version")
print "radioType is %s" % self.radioType
mocinfos = domcument.getElementsByTagName("mocinfo")
for mocinfo in mocinfos:
mocNode = MocNode()
name = mocinfo.getAttribute('name')
parent = mocinfo.getAttribute('parent')
self.mocs.append(name)
mocNode.setName(name)
mocNode.setParent(parent)
print "mocInfo name is %s parent is %s " % (name, parent)
fields = mocinfo.getElementsByTagName("field")
refmocs = []
for field in fields:
fieldName = field.getAttribute('name')
if fieldName[0 : 3] == "ref":
if fieldName[3].isnumeric():
refmoc = fieldName[4:]
else:
refmoc = fieldName[3:]
if refmoc not in refmocs:
refmocs.append(refmoc)
print " ref field is %s" % refmocs
mocNode.setRefmoc(refmocs)
self.mocNodes[name] = mocNode
def dealWithMocs(self):
for mocNode in self.mocNodes:
name = self.mocNodes[mocNode].name
refmocs = self.mocNodes[mocNode].refmoc
if len(refmocs) == 0:
continue
for refmoc in refmocs:
if refmoc not in self.mocs:
continue
if self.mocs.index(refmoc) < self.mocs.index(name):
continue
if self.mocNodes[refmoc].parent == name:
continue
selfIndex = self.mocs.index(name)
refIndex = self.mocs.index(refmoc)
self.mocs.remove(name)
self.mocs.insert(selfIndex, refmoc)
self.mocs.remove(refmoc)
self.mocs.insert(refIndex, name)
self.mocs.reverse()
def generateMocOrderXML(self):
self.parsexml()
self.dealWithMocs()
doc = minidom.Document()
root = doc.createElement("root")
doc.appendChild(root)
orderedMocList = doc.createElement("orderedMocList")
orderedMocList.setAttribute("mocType", getType(self.radioType))
orderedMocList.setAttribute("version", self.radioType)
for mo in self.mocs:
moc = doc.createElement("moc")
moc.setAttribute("name", mo)
orderedMocList.appendChild(moc)
root.appendChild(orderedMocList)
if self.radioType in radioTypes:
xmlName = self.radioType + "-" + getType(self.radioType) + "-cm-mocorder.xml"
else:
xmlName = self.radioType + "-cm-mocorder.xml"
absoluteFilePath = self.filePath + "/" + xmlName
xmlFile = file(absoluteFilePath, "w")
doc.writexml(xmlFile, "\t", " ", "\n", "UTF-8")
xmlFile.close()
if __name__ == "__main__":
print 'arguments passed is: ' + sys.argv[1]
getMocOrder(sys.argv[1])
| liyueshining/moon | mocorderconverter/MocOrder.py | Python | mit | 4,028 |
from django.core.management.base import BaseCommand, CommandError
from mangaki.models import Work, Rating
from django.db import connection
from django.db.models import Count
from collections import Counter
import sys
class Command(BaseCommand):
args = ''
help = 'Lookup some work'
def handle(self, *args, **options):
work = Work.objects.filter(title__icontains=args[0]).annotate(Count('rating')).order_by('-rating__count')[0]
print(work.title, work.id)
nb = Counter()
for rating in Rating.objects.filter(work=work):
nb[rating.choice] += 1
print(nb)
| RaitoBezarius/mangaki | mangaki/mangaki/management/commands/lookup.py | Python | agpl-3.0 | 616 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="DailyCount",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
("day", models.DateField(help_text="The day this count is for")),
("item_type", models.CharField(help_text="The thing being counted", max_length=1)),
("scope", models.CharField(help_text="The scope in which it is being counted", max_length=32)),
("count", models.PositiveIntegerField()),
],
),
migrations.AlterIndexTogether(name="dailycount", index_together=set([("item_type", "scope", "day")])),
]
| praekelt/casepro | casepro/statistics/migrations/0001_initial.py | Python | bsd-3-clause | 884 |
from django.db import models
from django.contrib.auth.models import User
from django.utils.encoding import python_2_unicode_compatible
class ActiveProfileManager(models.Manager):
def get_queryset(self):
return super(ActiveProfileManager,
self).get_queryset().filter(user__is_active=True)
@python_2_unicode_compatible
class CNHProfile(models.Model):
user = models.OneToOneField(User, related_name="profile", null=False)
nickname = models.CharField(
max_length=16,
null=True,
blank=True,
help_text='What is your nickname'
)
website = models.URLField(
blank=True,
help_text='What is your website URL?'
)
objects = models.Manager()
active = ActiveProfileManager()
def __str__(self):
return self.user.username
@property
def is_active(self):
return self.user.is_active
| kaka0525/Copy-n-Haste | CopyHaste/cnh_profile/models.py | Python | mit | 906 |
#
# Copyright © 2012 - 2021 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from django.urls import reverse
from weblate.fonts.models import Font, FontGroup
from weblate.fonts.tests.utils import FONT, FontTestCase
from weblate.lang.models import Language
class FontViewTest(FontTestCase):
@property
def fonts_url(self):
return reverse("fonts", kwargs=self.kw_project)
def test_noperm(self):
font = self.add_font()
response = self.client.get(self.fonts_url)
self.assertContains(response, font.family)
self.assertNotContains(response, "Add font")
def test_manage(self):
self.user.is_superuser = True
self.user.save()
# Validate the form is there
response = self.client.get(self.fonts_url)
self.assertContains(response, "Add font")
# Upload font
with open(FONT, "rb") as handle:
response = self.client.post(self.fonts_url, {"font": handle}, follow=True)
self.assertContains(response, "Droid Sans Fallback")
font = Font.objects.get()
self.assertContains(
self.client.get(font.get_absolute_url()), "Droid Sans Fallback"
)
# Create font group
response = self.client.post(
self.fonts_url, {"name": "font-group", "font": font.pk}, follow=True
)
self.assertContains(response, "font-group")
group = FontGroup.objects.get()
self.assertContains(self.client.get(group.get_absolute_url()), "font-group")
# Add override
language = Language.objects.get(code="zh_Hant")
response = self.client.post(
group.get_absolute_url(),
{"language": language.pk, "font": font.pk},
follow=True,
)
self.assertContains(response, language.name)
override = group.fontoverride_set.get()
# Remove override
self.client.post(
group.get_absolute_url(), {"override": override.pk}, follow=True
)
self.assertEqual(group.fontoverride_set.count(), 0)
# Remove group
self.client.post(group.get_absolute_url())
self.assertEqual(FontGroup.objects.count(), 0)
# Remove font
self.client.post(font.get_absolute_url())
self.assertEqual(Font.objects.count(), 0)
| phw/weblate | weblate/fonts/tests/test_views.py | Python | gpl-3.0 | 3,012 |
# Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from __future__ import division
import copy
from array import array
from whoosh import matching
from whoosh.compat import u
from whoosh.reading import TermNotFound
from whoosh.compat import methodcaller
# Exceptions
class QueryError(Exception):
"""Error encountered while running a query.
"""
pass
# Functions
def error_query(msg, q=None):
"""Returns the query in the second argument (or a :class:`NullQuery` if the
second argument is not given) with its ``error`` attribute set to
``msg``.
"""
if q is None:
q = _NullQuery()
q.error = msg
return q
def token_lists(q, phrases=True):
"""Returns the terms in the query tree, with the query hierarchy
represented as nested lists.
"""
if q.is_leaf():
from whoosh.query import Phrase
if phrases or not isinstance(q, Phrase):
return list(q.tokens())
else:
ls = []
for qq in q.children():
t = token_lists(qq, phrases=phrases)
if len(t) == 1:
t = t[0]
if t:
ls.append(t)
return ls
# Utility classes
class Lowest(object):
"""A value that is always compares lower than any other object except
itself.
"""
def __cmp__(self, other):
if other.__class__ is Lowest:
return 0
return -1
def __eq__(self, other):
return self.__class__ is type(other)
def __lt__(self, other):
return type(other) is not self.__class__
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
class Highest(object):
"""A value that is always compares higher than any other object except
itself.
"""
def __cmp__(self, other):
if other.__class__ is Highest:
return 0
return 1
def __eq__(self, other):
return self.__class__ is type(other)
def __lt__(self, other):
return type(other) is self.__class__
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
Lowest = Lowest()
Highest = Highest()
# Base classes
class Query(object):
"""Abstract base class for all queries.
Note that this base class implements __or__, __and__, and __sub__ to allow
slightly more convenient composition of query objects::
>>> Term("content", u"a") | Term("content", u"b")
Or([Term("content", u"a"), Term("content", u"b")])
>>> Term("content", u"a") & Term("content", u"b")
And([Term("content", u"a"), Term("content", u"b")])
>>> Term("content", u"a") - Term("content", u"b")
And([Term("content", u"a"), Not(Term("content", u"b"))])
"""
# For queries produced by the query parser, record where in the user
# query this object originated
startchar = endchar = None
# For queries produced by the query parser, records an error that resulted
# in this query
error = None
def __or__(self, query):
"""Allows you to use | between query objects to wrap them in an Or
query.
"""
from whoosh.query import Or
return Or([self, query]).normalize()
def __and__(self, query):
"""Allows you to use & between query objects to wrap them in an And
query.
"""
from whoosh.query import And
return And([self, query]).normalize()
def __sub__(self, query):
"""Allows you to use - between query objects to add the right-hand
query as a "NOT" query.
"""
from whoosh.query import And, Not
return And([self, Not(query)]).normalize()
def __hash__(self):
raise NotImplementedError
def __ne__(self, other):
return not self.__eq__(other)
def is_leaf(self):
"""Returns True if this is a leaf node in the query tree, or False if
this query has sub-queries.
"""
return True
def children(self):
"""Returns an iterator of the subqueries of this object.
"""
return iter([])
def is_range(self):
"""Returns True if this object searches for values within a range.
"""
return False
def has_terms(self):
"""Returns True if this specific object represents a search for a
specific term (as opposed to a pattern, as in Wildcard and Prefix) or
terms (i.e., whether the ``replace()`` method does something
meaningful on this instance).
"""
return False
def apply(self, fn):
"""If this query has children, calls the given function on each child
and returns a new copy of this node with the new children returned by
the function. If this is a leaf node, simply returns this object.
This is useful for writing functions that transform a query tree. For
example, this function changes all Term objects in a query tree into
Variations objects::
def term2var(q):
if isinstance(q, Term):
return Variations(q.fieldname, q.text)
else:
return q.apply(term2var)
q = And([Term("f", "alfa"),
Or([Term("f", "bravo"),
Not(Term("f", "charlie"))])])
q = term2var(q)
Note that this method does not automatically create copies of nodes.
To avoid modifying the original tree, your function should call the
:meth:`Query.copy` method on nodes before changing their attributes.
"""
return self
def accept(self, fn):
"""Applies the given function to this query's subqueries (if any) and
then to this query itself::
def boost_phrases(q):
if isintance(q, Phrase):
q.boost *= 2.0
return q
myquery = myquery.accept(boost_phrases)
This method automatically creates copies of the nodes in the original
tree before passing them to your function, so your function can change
attributes on nodes without altering the original tree.
This method is less flexible than using :meth:`Query.apply` (in fact
it's implemented using that method) but is often more straightforward.
"""
def fn_wrapper(q):
q = q.apply(fn_wrapper)
return fn(q)
return fn_wrapper(self)
def replace(self, fieldname, oldtext, newtext):
"""Returns a copy of this query with oldtext replaced by newtext (if
oldtext was anywhere in this query).
Note that this returns a *new* query with the given text replaced. It
*does not* modify the original query "in place".
"""
# The default implementation uses the apply method to "pass down" the
# replace() method call
if self.is_leaf():
return copy.copy(self)
else:
return self.apply(methodcaller("replace", fieldname, oldtext,
newtext))
def copy(self):
"""Deprecated, just use ``copy.deepcopy``.
"""
return copy.deepcopy(self)
def all_terms(self, termset=None, phrases=True):
"""Returns a set of all terms in this query tree.
This method exists for backwards compatibility. For more flexibility
use the :meth:`Query.iter_all_terms` method instead, which simply
yields the terms in the query.
:param phrases: Whether to add words found in Phrase queries.
:rtype: set
"""
from whoosh.query import Phrase
if not termset:
termset = set()
for q in self.leaves():
if q.has_terms():
if phrases or not isinstance(q, Phrase):
termset.update(q.terms())
return termset
def _existing_terms_helper(self, ixreader, termset, reverse):
if termset is None:
termset = set()
if reverse:
test = lambda t: t not in ixreader
else:
test = lambda t: t in ixreader
return termset, test
def existing_terms(self, ixreader, termset=None, reverse=False,
phrases=True, expand=False):
"""Returns a set of all terms in this query tree that exist in the
given ixreaderder.
This method exists for backwards compatibility. For more flexibility
use the :meth:`Query.iter_all_terms` method instead, which simply
yields the terms in the query.
:param ixreader: A :class:`whoosh.reading.IndexReader` object.
:param reverse: If True, this method adds *missing* terms rather than
*existing* terms to the set.
:param phrases: Whether to add words found in Phrase queries.
:param expand: If True, queries that match multiple terms
(such as :class:`Wildcard` and :class:`Prefix`) will return all
matching expansions.
:rtype: set
"""
# By default, this method calls all_terms() and then filters based on
# the contents of the reader. Subclasses that need to use the reader to
# generate the terms (i.e. MultiTerm) need to override this
# implementation
termset, test = self._existing_terms_helper(ixreader, termset, reverse)
if self.is_leaf():
gen = self.all_terms(phrases=phrases)
termset.update(t for t in gen if test(t))
else:
for q in self.children():
q.existing_terms(ixreader, termset, reverse, phrases, expand)
return termset
def leaves(self):
"""Returns an iterator of all the leaf queries in this query tree as a
flat series.
"""
if self.is_leaf():
yield self
else:
for q in self.children():
for qq in q.leaves():
yield qq
def iter_all_terms(self):
"""Returns an iterator of ("fieldname", "text") pairs for all terms in
this query tree.
>>> qp = qparser.QueryParser("text", myindex.schema)
>>> q = myparser.parse("alfa bravo title:charlie")
>>> # List the terms in a query
>>> list(q.iter_all_terms())
[("text", "alfa"), ("text", "bravo"), ("title", "charlie")]
>>> # Get a set of all terms in the query that don't exist in the index
>>> r = myindex.reader()
>>> missing = set(t for t in q.iter_all_terms() if t not in r)
set([("text", "alfa"), ("title", "charlie")])
>>> # All terms in the query that occur in fewer than 5 documents in
>>> # the index
>>> [t for t in q.iter_all_terms() if r.doc_frequency(t[0], t[1]) < 5]
[("title", "charlie")]
"""
for q in self.leaves():
if q.has_terms():
for t in q.terms():
yield t
def all_tokens(self, boost=1.0):
"""Returns an iterator of :class:`analysis.Token` objects corresponding
to all terms in this query tree. The Token objects will have the
``fieldname``, ``text``, and ``boost`` attributes set. If the query
was built by the query parser, they Token objects will also have
``startchar`` and ``endchar`` attributes indexing into the original
user query.
"""
if self.is_leaf():
for token in self.tokens(boost):
yield token
else:
boost *= self.boost if hasattr(self, "boost") else 1.0
for child in self.children():
for token in child.all_tokens(boost):
yield token
def terms(self):
"""Yields zero or more ("fieldname", "text") pairs searched for by this
query object. You can check whether a query object targets specific
terms before you call this method using :meth:`Query.has_terms`.
To get all terms in a query tree, use :meth:`Query.iter_all_terms`.
"""
for token in self.tokens():
yield (token.fieldname, token.text)
def tokens(self, boost=1.0):
"""Yields zero or more :class:`analysis.Token` objects corresponding to
the terms searched for by this query object. You can check whether a
query object targets specific terms before you call this method using
:meth:`Query.has_terms`.
The Token objects will have the ``fieldname``, ``text``, and ``boost``
attributes set. If the query was built by the query parser, they Token
objects will also have ``startchar`` and ``endchar`` attributes
indexing into the original user query.
To get all tokens for a query tree, use :meth:`Query.all_tokens`.
"""
return []
def requires(self):
"""Returns a set of queries that are *known* to be required to match
for the entire query to match. Note that other queries might also turn
out to be required but not be determinable by examining the static
query.
>>> a = Term("f", u"a")
>>> b = Term("f", u"b")
>>> And([a, b]).requires()
set([Term("f", u"a"), Term("f", u"b")])
>>> Or([a, b]).requires()
set([])
>>> AndMaybe(a, b).requires()
set([Term("f", u"a")])
>>> a.requires()
set([Term("f", u"a")])
"""
# Subclasses should implement the _add_required_to(qset) method
return set([self])
def field(self):
"""Returns the field this query matches in, or None if this query does
not match in a single field.
"""
return self.fieldname
def with_boost(self, boost):
"""Returns a COPY of this query with the boost set to the given value.
If a query type does not accept a boost itself, it will try to pass the
boost on to its children, if any.
"""
q = self.copy()
q.boost = boost
return q
def estimate_size(self, ixreader):
"""Returns an estimate of how many documents this query could
potentially match (for example, the estimated size of a simple term
query is the document frequency of the term). It is permissible to
overestimate, but not to underestimate.
"""
raise NotImplementedError
def estimate_min_size(self, ixreader):
"""Returns an estimate of the minimum number of documents this query
could potentially match.
"""
return self.estimate_size(ixreader)
def matcher(self, searcher, weighting=None):
"""Returns a :class:`~whoosh.matching.Matcher` object you can use to
retrieve documents and scores matching this query.
:rtype: :class:`whoosh.matching.Matcher`
"""
raise NotImplementedError
def docs(self, searcher):
"""Returns an iterator of docnums matching this query.
>>> with my_index.searcher() as searcher:
... list(my_query.docs(searcher))
[10, 34, 78, 103]
:param searcher: A :class:`whoosh.searching.Searcher` object.
"""
try:
return self.matcher(searcher).all_ids()
except TermNotFound:
return iter([])
def deletion_docs(self, searcher):
"""Returns an iterator of docnums matching this query for the purpose
of deletion. The :meth:`~whoosh.writing.IndexWriter.delete_by_query`
method will use this method when deciding what documents to delete,
allowing special queries (e.g. nested queries) to override what
documents are deleted. The default implementation just forwards to
:meth:`Query.docs`.
"""
return self.docs(searcher)
def normalize(self):
"""Returns a recursively "normalized" form of this query. The
normalized form removes redundancy and empty queries. This is called
automatically on query trees created by the query parser, but you may
want to call it yourself if you're writing your own parser or building
your own queries.
>>> q = And([And([Term("f", u"a"),
... Term("f", u"b")]),
... Term("f", u"c"), Or([])])
>>> q.normalize()
And([Term("f", u"a"), Term("f", u"b"), Term("f", u"c")])
Note that this returns a *new, normalized* query. It *does not* modify
the original query "in place".
"""
return self
def simplify(self, ixreader):
"""Returns a recursively simplified form of this query, where
"second-order" queries (such as Prefix and Variations) are re-written
into lower-level queries (such as Term and Or).
"""
return self
# Null query
class _NullQuery(Query):
"Represents a query that won't match anything."
boost = 1.0
def __call__(self):
return self
def __repr__(self):
return "<%s>" % (self.__class__.__name__,)
def __eq__(self, other):
return isinstance(other, _NullQuery)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return id(self)
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
def field(self):
return None
def estimate_size(self, ixreader):
return 0
def normalize(self):
return self
def simplify(self, ixreader):
return self
def docs(self, searcher):
return []
def matcher(self, searcher, weighting=None):
return matching.NullMatcher()
NullQuery = _NullQuery()
# Every
class Every(Query):
"""A query that matches every document containing any term in a given
field. If you don't specify a field, the query matches every document.
>>> # Match any documents with something in the "path" field
>>> q = Every("path")
>>> # Matcher every document
>>> q = Every()
The unfielded form (matching every document) is efficient.
The fielded is more efficient than a prefix query with an empty prefix or a
'*' wildcard, but it can still be very slow on large indexes. It requires
the searcher to read the full posting list of every term in the given
field.
Instead of using this query it is much more efficient when you create the
index to include a single term that appears in all documents that have the
field you want to match.
For example, instead of this::
# Match all documents that have something in the "path" field
q = Every("path")
Do this when indexing::
# Add an extra field that indicates whether a document has a path
schema = fields.Schema(path=fields.ID, has_path=fields.ID)
# When indexing, set the "has_path" field based on whether the document
# has anything in the "path" field
writer.add_document(text=text_value1)
writer.add_document(text=text_value2, path=path_value2, has_path="t")
Then to find all documents with a path::
q = Term("has_path", "t")
"""
def __init__(self, fieldname=None, boost=1.0):
"""
:param fieldname: the name of the field to match, or ``None`` or ``*``
to match all documents.
"""
if not fieldname or fieldname == "*":
fieldname = None
self.fieldname = fieldname
self.boost = boost
def __repr__(self):
return "%s(%r, boost=%s)" % (self.__class__.__name__, self.fieldname,
self.boost)
def __eq__(self, other):
return (other and self.__class__ is other.__class__
and self.fieldname == other.fieldname
and self.boost == other.boost)
def __unicode__(self):
return u("%s:*") % self.fieldname
__str__ = __unicode__
def __hash__(self):
return hash(self.fieldname)
def estimate_size(self, ixreader):
return ixreader.doc_count()
def matcher(self, searcher, weighting=None):
fieldname = self.fieldname
reader = searcher.reader()
if fieldname in (None, "", "*"):
# This takes into account deletions
doclist = array("I", reader.all_doc_ids())
elif (reader.supports_caches()
and reader.fieldcache_available(fieldname)):
# If the reader has a field cache, use it to quickly get the list
# of documents that have a value for this field
fc = reader.fieldcache(self.fieldname)
doclist = array("I", (docnum for docnum, ordinal in fc.ords()
if ordinal != 0))
else:
# This is a hacky hack, but just create an in-memory set of all the
# document numbers of every term in the field. This is SLOOOW for
# large indexes
doclist = set()
for text in searcher.lexicon(fieldname):
pr = searcher.postings(fieldname, text)
doclist.update(pr.all_ids())
doclist = sorted(doclist)
return matching.ListMatcher(doclist, all_weights=self.boost)
| mozilla/popcorn_maker | vendor-local/lib/python/whoosh/query/qcore.py | Python | bsd-3-clause | 23,233 |
from django.conf import settings
from django.contrib import admin
from wagtail.documents.models import Document
if hasattr(settings, 'WAGTAILDOCS_DOCUMENT_MODEL') and settings.WAGTAILDOCS_DOCUMENT_MODEL != 'wagtaildocs.Document':
# This installation provides its own custom document class;
# to avoid confusion, we won't expose the unused wagtaildocs.Document class
# in the admin.
pass
else:
admin.site.register(Document)
| zerolab/wagtail | wagtail/documents/admin.py | Python | bsd-3-clause | 446 |
# -*- coding: utf-8 -*-
#Stage 2 Update (Python 3)
from __future__ import unicode_literals
from builtins import str
from builtins import map
from builtins import range
import ast, datetime, importlib, json, logging, scrapy
from jsonpath_rw import jsonpath, parse
from jsonpath_rw.lexer import JsonPathLexerError
from scrapy.selector import Selector
from scrapy.http import Request, FormRequest
from scrapy.loader import ItemLoader
from scrapy.loader.processors import Join, TakeFirst
from scrapy.exceptions import CloseSpider
from django.db.models.signals import post_save
from django.utils.encoding import smart_text
from dynamic_scraper.spiders.django_base_spider import DjangoBaseSpider
from dynamic_scraper.models import ScraperElem
from dynamic_scraper.utils.loader import JsonItemLoader
from dynamic_scraper.utils.scheduler import Scheduler
from dynamic_scraper.utils import processors
class DjangoSpider(DjangoBaseSpider):
tmp_non_db_results = {}
non_db_results = {}
current_output_num_mp_response_bodies = 0
current_output_num_dp_response_bodies = 0
def __init__(self, *args, **kwargs):
self.mandatory_vars.append('scraped_obj_class')
self.mandatory_vars.append('scraped_obj_item_class')
super(DjangoSpider, self).__init__(self, *args, **kwargs)
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
spider = cls(*args, **kwargs)
spider._set_crawler(crawler)
spider._set_config(**kwargs)
spider._set_request_kwargs()
for cp_path in spider.conf['CUSTOM_PROCESSORS']:
try:
custom_processors = importlib.import_module(cp_path)
except ImportError:
msg = "Custom processors from {path} could not be imported, processors won't be applied".format(
path=cp_path,
)
spider.log(msg, logging.WARNING)
post_save.connect(spider._post_save_tasks, sender=spider.scraped_obj_class)
spider._set_start_urls(spider.scrape_url)
spider.scheduler = Scheduler(spider.scraper.scraped_obj_class.scraper_scheduler_conf)
spider.from_page = 'MP'
spider.loader = None
spider.dummy_loader = None
spider.items_read_count = 0
spider.items_save_count = 0
msg = 'Spider for {roc} "{ro}" ({pk}) initialized.'.format(
roc=spider.ref_object.__class__.__name__,
ro=str(spider.ref_object),
pk=str(spider.ref_object.pk),
)
spider.log(msg, logging.INFO)
return spider
def output_usage_help(self):
out = (
'',
'DDS Usage',
'=========',
' scrapy crawl [scrapy_options] SPIDERNAME -a id=REF_OBJECT_ID [dds_options]',
'',
'Options',
'-------',
'-a do_action=(yes|no) Save output to DB, default: no (Test Mode or File Output)',
'-L LOG_LEVEL (scrapy option) Setting the log level for both Scrapy and DDS',
'-a run_type|rt=(TASK|SHELL) Simulate task based scraper run, default: SHELL',
'-a max_items_read|mir=[Int] Limit number of items to read',
'-a max_items_save|mis=[Int] Limit number of items to save',
'-a max_pages_read|mpr=[Int] Limit number of pages to read (static pagination)',
'-a start_page|sp=[PAGE] Start at page PAGE, e.g. 5, F (static pagination)',
'-a end_page|ep=[PAGE] End scraping at page PAGE, e.g. 10, M (static pagination)',
'-a num_pages_follow|npf=[Int] Number of pages to follow (dynamic pagination)',
'-a output_num_mp_response_bodies|omp=[Int] Output response body content of MP for debugging',
'-a output_num_dp_response_bodies|odb=[Int] Output response body content of DP for debugging',
'',
)
for out_str in out:
self.dds_logger.info(out_str)
def _set_request_kwargs(self):
super(DjangoSpider, self)._set_request_kwargs()
for rpt in self.scraper.requestpagetype_set.all():
if rpt.form_data != '':
try:
form_data = json.loads(rpt.form_data)
except ValueError:
msg = "Incorrect form_data attribute ({pt}): not a valid JSON dict!".format(pt=rpt.page_type)
self.dds_logger.error(msg)
raise CloseSpider()
if not isinstance(form_data, dict):
msg = "Incorrect form_data attribute ({pt}): not a valid JSON dict!".format(pt=rpt.page_type)
self.dds_logger.error(msg)
raise CloseSpider()
def _set_config(self, **kwargs):
log_msg = ""
#max_items_read|mir
if 'mir' in kwargs:
kwargs['max_items_read'] = kwargs['mir']
if 'max_items_read' in kwargs:
try:
self.conf['MAX_ITEMS_READ'] = int(kwargs['max_items_read'])
except ValueError:
msg = "You have to provide an integer value as max_items_read parameter!"
self.dds_logger.error(msg)
raise CloseSpider()
if len(log_msg) > 0:
log_msg += ", "
log_msg += "max_items_read " + str(self.conf['MAX_ITEMS_READ'])
else:
self.conf['MAX_ITEMS_READ'] = self.scraper.max_items_read
#max_items_save|mis
if 'mis' in kwargs:
kwargs['max_items_save'] = kwargs['mis']
if 'max_items_save' in kwargs:
try:
self.conf['MAX_ITEMS_SAVE'] = int(kwargs['max_items_save'])
except ValueError:
msg = "You have to provide an integer value as max_items_save parameter!"
self.dds_logger.error(msg)
raise CloseSpider()
if len(log_msg) > 0:
log_msg += ", "
log_msg += "max_items_save " + str(self.conf['MAX_ITEMS_SAVE'])
else:
self.conf['MAX_ITEMS_SAVE'] = self.scraper.max_items_save
#max_pages_read|mpr
if 'mpr' in kwargs:
kwargs['max_pages_read'] = kwargs['mpr']
if 'max_pages_read' in kwargs:
try:
self.conf['MAX_PAGES_READ'] = int(kwargs['max_pages_read'])
except ValueError:
msg = "You have to provide an integer value as max_pages_read parameter!"
self.dds_logger.error(msg)
raise CloseSpider()
if len(log_msg) > 0:
log_msg += ", "
log_msg += "max_pages_read " + str(self.conf['MAX_PAGES_READ'])
else:
self.conf['MAX_PAGES_READ'] = None
#start_page|sp
if 'sp' in kwargs:
kwargs['start_page'] = kwargs['sp']
if 'start_page' in kwargs:
self.conf['START_PAGE'] = kwargs['start_page']
else:
self.conf['START_PAGE'] = None
#end_page|ep
if 'ep' in kwargs:
kwargs['end_page'] = kwargs['ep']
if 'end_page' in kwargs:
self.conf['END_PAGE'] = kwargs['end_page']
else:
self.conf['END_PAGE'] = None
#num_pages_follow|npf
if 'npf' in kwargs:
kwargs['num_pages_follow'] = kwargs['npf']
if 'num_pages_follow' in kwargs:
try:
self.conf['NUM_PAGES_FOLLOW'] = int(kwargs['num_pages_follow'])
except ValueError:
msg = "You have to provide an integer value as num_pages_follow parameter!"
self.dds_logger.error(msg)
raise CloseSpider()
if len(log_msg) > 0:
log_msg += ", "
log_msg += "num_pages_follow " + str(self.conf['NUM_PAGES_FOLLOW'])
else:
self.conf['NUM_PAGES_FOLLOW'] = self.scraper.num_pages_follow
#output_num_mp_response_bodies|omp
if 'omp' in kwargs:
kwargs['output_num_mp_response_bodies'] = kwargs['omp']
if 'output_num_mp_response_bodies' in kwargs:
try:
self.conf['OUTPUT_NUM_MP_RESPONSE_BODIES'] = int(kwargs['output_num_mp_response_bodies'])
except ValueError:
msg = "You have to provide an integer value as output_num_mp_response_bodies parameter!"
self.dds_logger.error(msg)
raise CloseSpider()
if len(log_msg) > 0:
log_msg += ", "
log_msg += "output_num_mp_response_bodies " + str(self.conf['OUTPUT_NUM_MP_RESPONSE_BODIES'])
else:
self.conf['OUTPUT_NUM_MP_RESPONSE_BODIES'] = 0
#output_num_dp_response_bodies|odp
if 'odp' in kwargs:
kwargs['output_num_dp_response_bodies'] = kwargs['odp']
if 'output_num_dp_response_bodies' in kwargs:
try:
self.conf['OUTPUT_NUM_DP_RESPONSE_BODIES'] = int(kwargs['output_num_dp_response_bodies'])
except ValueError:
msg = "You have to provide an integer value as output_num_dp_response_bodies parameter!"
self.dds_logger.error(msg)
raise CloseSpider()
if len(log_msg) > 0:
log_msg += ", "
log_msg += "output_num_dp_response_bodies " + str(self.conf['OUTPUT_NUM_DP_RESPONSE_BODIES'])
else:
self.conf['OUTPUT_NUM_DP_RESPONSE_BODIES'] = 0
super(DjangoSpider, self)._set_config(log_msg, **kwargs)
def limit_page_nums(self, pages):
if self.conf['START_PAGE']:
index = 0
exists = False
for page in pages:
if str(page) == self.conf['START_PAGE']:
pages = pages[index:]
exists = True
break
index += 1
if not exists:
msg = "The provided start page doesn't exist in the range of page values!"
self.dds_logger.error(msg)
raise CloseSpider()
if self.conf['END_PAGE']:
index = 0
exists = False
for page in pages:
if str(page) == self.conf['END_PAGE']:
pages = pages[:index+1]
exists = True
break
index += 1
if not exists:
msg = "The provided end page doesn't exist in the range of page values!"
self.dds_logger.error(msg)
raise CloseSpider()
return pages
def _set_start_urls(self, scrape_url):
self.start_urls = []
if self.scraper.pagination_type in ['R', 'F',]:
if not self.scraper.pagination_page_replace:
msg = 'Please provide a pagination_page_replace context corresponding to pagination_type!'
self.dds_logger.error(msg)
raise CloseSpider()
if self.scraper.pagination_type == 'R':
try:
pages = self.scraper.pagination_page_replace
pages = pages.split(',')
if len(pages) > 3:
raise Exception
pages = list(range(*list(map(int, pages))))
except Exception:
msg = 'Pagination_page_replace for pagination_type "RANGE_FUNCT" ' +\
'has to be provided as python range function arguments ' +\
'[start], stop[, step] (e.g. "1, 50, 10", no brackets)!'
self.dds_logger.error(msg)
raise CloseSpider()
pages = self.limit_page_nums(pages)
if self.scraper.pagination_type == 'F':
try:
pages = self.scraper.pagination_page_replace
pages = pages.strip(', ')
pages = ast.literal_eval("[" + pages + ",]")
except:
msg = 'Wrong pagination_page_replace format for pagination_type "FREE_LIST", ' +\
"Syntax: 'Replace string 1', 'Another replace string 2', 'A number 3', ..."
self.dds_logger.error(msg)
raise CloseSpider()
pages = self.limit_page_nums(pages)
if self.scraper.pagination_type in ['R', 'F',]:
append_str = self.scraper.pagination_append_str
if scrape_url[-1:] == '/' and append_str[0:1] == '/':
append_str = append_str[1:]
self.pages = pages
if self.conf['MAX_PAGES_READ']:
self.pages = self.pages[0:self.conf['MAX_PAGES_READ']]
for page in self.pages:
url = scrape_url + append_str.format(page=page)
self.start_urls.append(url)
if not self.scraper.pagination_on_start and not self.conf['START_PAGE']:
self.start_urls.insert(0, scrape_url)
self.pages.insert(0, "")
if self.scraper.pagination_type in ['N', 'O',]:
self.start_urls.append(scrape_url)
self.pages = ["",]
num = len(self.start_urls)
if (num == 1):
url_str = 'URL'
else:
url_str = 'URLs'
self.log("Scraper set to run on {num} start {url_str}.".format(
num=num, url_str=url_str), logging.INFO)
def _prepare_mp_req_data(self, kwargs_orig, form_data_orig, page, follow_page=''):
kwargs = kwargs_orig.copy()
if 'meta' not in kwargs:
kwargs['meta'] = {}
form_data = None
if form_data_orig:
form_data = json.loads(form_data_orig).copy()
if 'headers' in kwargs:
kwargs['headers'] = json.loads(json.dumps(kwargs['headers']).replace('{page}', str(page)))
kwargs['headers'] = json.loads(json.dumps(kwargs['headers']).replace('{follow_page}', str(follow_page)))
if 'body' in kwargs:
kwargs['body'] = kwargs['body'].replace('{page}', str(page))
kwargs['body'] = kwargs['body'].replace('{follow_page}', str(follow_page))
if 'cookies' in kwargs:
kwargs['cookies'] = json.loads(json.dumps(kwargs['cookies']).replace('{page}', str(page)))
kwargs['cookies'] = json.loads(json.dumps(kwargs['cookies']).replace('{follow_page}', str(follow_page)))
if form_data:
form_data = json.loads(json.dumps(form_data).replace('{page}', str(page)))
form_data = json.loads(json.dumps(form_data).replace('{follow_page}', str(follow_page)))
return kwargs, form_data
def _log_page_info(self, page_num, follow_page_num, url, rpt, form_data, kwargs):
self.dds_logger.info('')
self.dds_logger.info(self.bcolors['BOLD'] + '======================================================================================' + self.bcolors['ENDC'])
self.struct_log("{es}{es2}Scraping data from page {p}({fp}).{ec}{ec}".format(
p=page_num, fp=follow_page_num, es=self.bcolors['BOLD'], es2=self.bcolors['HEADER'], ec=self.bcolors['ENDC']))
self.struct_log("URL : {url}".format(url=url))
self._log_request_info(rpt, form_data, kwargs)
self.dds_logger.info(self.bcolors['BOLD'] + '======================================================================================' + self.bcolors['ENDC'])
def start_requests(self):
index = 0
rpt = self.scraper.get_main_page_rpt()
follow_page_num = 0
for url in self.start_urls:
self._set_meta_splash_args()
page_num = index + 1
kwargs, form_data = self._prepare_mp_req_data(self.mp_request_kwargs, self.scraper.get_main_page_rpt().form_data, self.pages[index])
kwargs['meta']['page_num'] = page_num
kwargs['meta']['follow_page_num'] = follow_page_num
kwargs['meta']['rpt'] = rpt
self._log_page_info(page_num, follow_page_num, url, rpt, form_data, kwargs)
index += 1
if rpt.request_type == 'R':
yield Request(url, callback=self.parse, method=rpt.method, dont_filter=rpt.dont_filter, **kwargs)
else:
yield FormRequest(url, callback=self.parse, method=rpt.method, formdata=form_data, dont_filter=rpt.dont_filter, **kwargs)
def _check_for_double_item(self, item):
idf_elems = self.scraper.get_id_field_elems()
num_item_idfs = 0
for idf_elem in idf_elems:
idf_name = idf_elem.scraped_obj_attr.name
if idf_name in item:
num_item_idfs += 1
cnt_double = 0
if len(idf_elems) > 0 and num_item_idfs == len(idf_elems):
qs = self.scraped_obj_class.objects
for idf_elem in idf_elems:
idf_name = idf_elem.scraped_obj_attr.name
qs = qs.filter(**{idf_name:item[idf_name]})
cnt_double = qs.count()
# Mark item as DOUBLE item
if cnt_double > 0:
item._is_double = True
return item, True
else:
item._is_double = False
return item, False
def _get_processors(self, scraper_elem):
procs_str = scraper_elem.processors
attr_type = scraper_elem.scraped_obj_attr.attr_type
if scraper_elem.use_default_procs:
procs = [TakeFirst(), processors.string_strip,]
else:
procs = []
if not procs_str:
return procs
procs_tmp = list(procs_str.split(','))
for p in procs_tmp:
p = p.strip()
added = False
if hasattr(processors, p):
procs.append(getattr(processors, p))
added = True
for cp_path in self.conf['CUSTOM_PROCESSORS']:
try:
custom_processors = importlib.import_module(cp_path)
if hasattr(custom_processors, p):
procs.append(getattr(custom_processors, p))
added = True
except ImportError:
pass
if not added:
self.log("Processor '{p}' is not defined!".format(p=p), logging.ERROR)
procs = tuple(procs)
return procs
def _set_loader_context(self, context_str):
try:
context_str = context_str.strip(', ')
context = ast.literal_eval("{" + context_str + "}")
context['spider'] = self
self.loader.context = context
self.dummy_loader.context = context
except SyntaxError:
self.log("Wrong context definition format: " + context_str, logging.ERROR)
def _scrape_item_attr(self, scraper_elem, response, from_page, item_num):
if(from_page == scraper_elem.request_page_type or
(from_page == 'FP' and scraper_elem.request_page_type == 'MP')):
procs = self._get_processors(scraper_elem)
self._set_loader_context(scraper_elem.proc_ctxt)
if not scraper_elem.scraped_obj_attr.save_to_db:
name = 'tmp_field'
loader = self.dummy_loader
else:
name = scraper_elem.scraped_obj_attr.name
loader = self.loader
static_ctxt = loader.context.get('static', '')
self.log("Applying the following processors: {p_list}".format(
p_list=str([p.__name__ if hasattr(p, '__name__') else type(p).__name__ for p in procs])),
logging.DEBUG)
if processors.static in procs and static_ctxt:
loader.add_value(name, static_ctxt)
elif(scraper_elem.reg_exp):
loader.add_xpath(name, scraper_elem.x_path, *procs, re=scraper_elem.reg_exp)
else:
loader.add_xpath(name, scraper_elem.x_path, *procs)
if not scraper_elem.scraped_obj_attr.save_to_db:
item = loader.load_item()
if name in item:
self.tmp_non_db_results[item_num][scraper_elem.scraped_obj_attr.name] = item[name]
rpt = self.scraper.requestpagetype_set.filter(page_type=from_page)[0]
rpt_str = rpt.get_content_type_display()
if rpt.render_javascript:
rpt_str += '-JS'
rpt_str += '|' + rpt.method
page_str = str(response.request.meta['page_num'])
page_str += '(' + str(response.request.meta['follow_page_num']) + ')-'
msg = '{page_type: <4} {rpt_str: <13} {cs}{name: <20}{ce} {page}{num} '.format(page=page_str, num=str(item_num), name=name, rpt_str=rpt_str, page_type=from_page, cs=self.bcolors["BOLD"], ce=self.bcolors["ENDC"])
c_values = loader.get_collected_values(name)
if len(c_values) > 0:
val_str = c_values[0]
if self.conf['CONSOLE_LOG_LEVEL'] != 'DEBUG':
val_str = (val_str[:400] + '..') if len(val_str) > 400 else val_str
msg += smart_text(val_str)
else:
msg += 'None'
self.log(msg, logging.INFO)
def _set_loader(self, response, from_page, xs, item):
self.from_page = from_page
rpt = self.scraper.get_rpt(from_page)
if not (self.from_page == 'MP' or self.from_page == 'FP'):
item = response.request.meta['item']
if rpt.content_type == 'J':
json_resp = json.loads(response.body_as_unicode())
self.loader = JsonItemLoader(item=item, selector=json_resp)
else:
self.loader = ItemLoader(item=item, response=response)
else:
if rpt.content_type == 'J':
self.loader = JsonItemLoader(item=item, selector=xs)
else:
self.loader = ItemLoader(item=item, selector=xs)
self.loader.default_output_processor = TakeFirst()
self.loader.log = self.log
def _set_dummy_loader(self, response, from_page, xs, item):
self.from_page = from_page
rpt = self.scraper.get_rpt(from_page)
if not (self.from_page == 'MP' or self.from_page == 'FP'):
item = response.request.meta['item']
if rpt.content_type == 'J':
json_resp = json.loads(response.body_as_unicode())
self.dummy_loader = JsonItemLoader(item=DummyItem(), selector=json_resp)
else:
self.dummy_loader = ItemLoader(item=DummyItem(), response=response)
else:
if rpt.content_type == 'J':
self.dummy_loader = JsonItemLoader(item=DummyItem(), selector=xs)
else:
self.dummy_loader = ItemLoader(item=DummyItem(), selector=xs)
self.dummy_loader.default_output_processor = TakeFirst()
self.dummy_loader.log = self.log
def parse_item(self, response, xs=None, from_page=None, item_num=None):
#logging.info(str(response.request.meta))
#logging.info(response.body_as_unicode())
if not from_page:
from_page = response.request.meta['from_page']
item_num = response.request.meta['item_num']
self._set_loader(response, from_page, xs, self.scraped_obj_item_class())
self._set_dummy_loader(response, from_page, xs, self.scraped_obj_item_class())
if from_page == 'MP' or from_page == 'FP':
self.items_read_count += 1
else:
if self.current_output_num_dp_response_bodies < self.conf['OUTPUT_NUM_DP_RESPONSE_BODIES']:
self.current_output_num_dp_response_bodies += 1
self.log("Response body ({url})\n\n***** RP_DP_{num}_START *****\n{resp_body}\n***** RP_DP_{num}_END *****\n\n".format(
url=response.url,
resp_body=response.body,
num=self.current_output_num_dp_response_bodies), logging.INFO)
elems = self.scraper.get_scrape_elems()
for elem in elems:
if not elem.scraped_obj_attr.save_to_db:
self._set_dummy_loader(response, from_page, xs, self.scraped_obj_item_class())
self._scrape_item_attr(elem, response, from_page, item_num)
# Dealing with Django Char- and TextFields defining blank field as null
item = self.loader.load_item()
for key, value in list(item.items()):
if value == None and \
self.scraped_obj_class()._meta.get_field(key).blank and \
not self.scraped_obj_class()._meta.get_field(key).null:
item[key] = ''
if not (from_page == 'MP' or from_page == 'FP'):
item, is_double = self._check_for_double_item(item)
if response.request.meta['last']:
self.non_db_results[id(item)] = self.tmp_non_db_results[item_num].copy()
return item
else:
return item
def _replace_placeholders(self, text_str, item, item_num, only_mp):
applied = []
if type(text_str).__name__ != 'str' and type(text_str).__name__ != 'unicode':
return text_str, applied
standard_elems = self.scraper.get_standard_elems()
for scraper_elem in standard_elems:
if not only_mp or scraper_elem.request_page_type == 'MP':
name = scraper_elem.scraped_obj_attr.name
placeholder = '{' + name + '}'
if not scraper_elem.scraped_obj_attr.save_to_db:
if name in self.tmp_non_db_results[item_num] and \
self.tmp_non_db_results[item_num][name] != None and \
placeholder in text_str:
text_str = text_str.replace(placeholder, self.tmp_non_db_results[item_num][name])
applied.append(placeholder)
else:
if name in item and \
item[name] != None and \
placeholder in text_str:
text_str = text_str.replace(placeholder, item[name])
applied.append(placeholder)
return text_str, applied
def _do_req_info_replacements(self, item, item_num, page, json_dict, info_str):
json_dict = json.loads(json.dumps(json_dict).replace('{page}', str(page)))
for key, value in list(json_dict.items()):
new_value, applied = self._replace_placeholders(value, item, item_num, True)
json_dict[key] = new_value
if len(applied) > 0:
msg = "Request info placeholder(s) applied (item {id}): {a}".format(
a=str(applied), id=item._dds_id_str)
self.log(msg, logging.DEBUG)
self.log(info_str + " [" + str(key) + "] before: " + str(value), logging.DEBUG)
self.log(info_str + " [" + str(key) + "] after : " + str(new_value), logging.DEBUG)
return json_dict
def parse(self, response):
xs = Selector(response)
base_objects = []
base_elem = self.scraper.get_base_elem()
rpt = response.request.meta['rpt']
page_num = response.request.meta['page_num']
page = self.pages[page_num - 1]
follow_page_num = response.request.meta['follow_page_num']
if rpt.page_type == 'MP':
if self.current_output_num_mp_response_bodies < self.conf['OUTPUT_NUM_MP_RESPONSE_BODIES']:
self.current_output_num_mp_response_bodies += 1
self.log("Response body ({url})\n\n***** RP_MP_{num}_START *****\n{resp_body}\n***** RP_MP_{num}_END *****\n\n".format(
url=response.url,
resp_body=response.body,
num=self.current_output_num_mp_response_bodies), logging.INFO)
if rpt.content_type == 'J':
json_resp = None
try:
json_resp = json.loads(response.body_as_unicode())
except ValueError:
msg = "JSON response for MP could not be parsed!"
self.log(msg, logging.ERROR)
if json_resp:
try:
jsonpath_expr = parse(base_elem.x_path)
except JsonPathLexerError:
msg = "JsonPath for base elem could not be processed!"
self.dds_logger.error(msg)
raise CloseSpider()
base_objects = [match.value for match in jsonpath_expr.find(json_resp)]
if len(base_objects) > 0:
base_objects = base_objects[0]
else:
base_objects = response.xpath(base_elem.x_path)
if(len(base_objects) == 0):
self.log("{cs}No base objects found.{ce}".format(
cs=self.bcolors["INFO"], ce=self.bcolors["ENDC"]), logging.ERROR)
if(self.conf['MAX_ITEMS_READ']):
items_left = min(len(base_objects), self.conf['MAX_ITEMS_READ'] - self.items_read_count)
base_objects = base_objects[0:items_left]
for obj in base_objects:
item_num = self.items_read_count + 1
self.tmp_non_db_results[item_num] = {}
page_str = str(page_num) + '(' + str(follow_page_num) + ')'
self.dds_logger.info("")
self.dds_logger.info(self.bcolors['BOLD'] + '--------------------------------------------------------------------------------------' + self.bcolors['ENDC'])
self.struct_log("{cs}Starting to crawl item {i} from page {p}.{ce}".format(
i=str(item_num), p=page_str, cs=self.bcolors["HEADER"], ce=self.bcolors["ENDC"]))
self.dds_logger.info(self.bcolors['BOLD'] + '--------------------------------------------------------------------------------------' + self.bcolors['ENDC'])
item = self.parse_item(response, obj, rpt.page_type, item_num)
item._dds_item_page = page
item._dds_item_page_num = page_num
item._dds_item_follow_page_num = follow_page_num
item._dds_item_id = item_num
item._dds_id_str = str(item._dds_item_page_num) + '(' + str(item._dds_item_follow_page_num) + ')-' + str(item._dds_item_id)
if item:
only_main_page_idfs = True
idf_elems = self.scraper.get_id_field_elems()
for idf_elem in idf_elems:
if idf_elem.request_page_type != 'MP':
only_main_page_idfs = False
is_double = False
if only_main_page_idfs:
item, is_double = self._check_for_double_item(item)
# Don't go on reading detail pages when...
# No detail page URLs defined or
# DOUBLE item with only main page IDFs and no standard update elements to be scraped from detail pages or
# generally no attributes scraped from detail pages
cnt_sue_detail = self.scraper.get_standard_update_elems_from_detail_pages().count()
cnt_detail_scrape = self.scraper.get_from_detail_pages_scrape_elems().count()
if self.scraper.get_detail_page_url_elems().count() == 0 or \
(is_double and cnt_sue_detail == 0) or cnt_detail_scrape == 0:
self.non_db_results[id(item)] = self.tmp_non_db_results[item_num].copy()
yield item
else:
#self.run_detail_page_request()
url_elems = self.scraper.get_detail_page_url_elems()
for url_elem in url_elems:
if not url_elem.scraped_obj_attr.save_to_db:
url_before = self.tmp_non_db_results[item_num][url_elem.scraped_obj_attr.name]
url, applied = self._replace_placeholders(url_before, item, item_num, True)
self.tmp_non_db_results[item_num][url_elem.scraped_obj_attr.name] = url
else:
url_before = item[url_elem.scraped_obj_attr.name]
url, applied = self._replace_placeholders(url_before, item, item_num, True)
item[url_elem.scraped_obj_attr.name] = url
if len(applied) > 0:
msg = "Detail page URL placeholder(s) applied (item {id}): {a}".format(
a=str(applied), id=item._dds_id_str)
self.log(msg, logging.DEBUG)
self.log("URL before: " + url_before, logging.DEBUG)
self.log("URL after : " + url, logging.DEBUG)
dp_rpt = self.scraper.get_rpt_for_scraped_obj_attr(url_elem.scraped_obj_attr)
kwargs = self.dp_request_kwargs[dp_rpt.page_type].copy()
if 'meta' not in kwargs:
kwargs['meta'] = {}
kwargs['meta']['page_num'] = page_num
kwargs['meta']['follow_page_num'] = follow_page_num
kwargs['meta']['item'] = item
kwargs['meta']['from_page'] = dp_rpt.page_type
kwargs['meta']['item_num'] = item_num
kwargs['meta']['rpt'] = dp_rpt
if 'headers' in kwargs:
kwargs['headers'] = self._do_req_info_replacements(item, item_num, page, kwargs['headers'], "HEADERS")
if 'body' in kwargs:
body_before = kwargs['body']
kwargs['body'] = kwargs['body'].replace('{page}', str(page))
kwargs['body'], applied = self._replace_placeholders(kwargs['body'], item, item_num, True)
if len(applied) > 0:
msg = "Request info placeholder(s) applied (item {id}): {a}".format(
a=str(applied), id=item._dds_id_str)
self.log(msg, logging.DEBUG)
self.log("BODY before: " + body_before, logging.DEBUG)
self.log("BODY after : " + kwargs['body'], logging.DEBUG)
if 'cookies' in kwargs:
kwargs['cookies'] = self._do_req_info_replacements(item, item_num, page, kwargs['cookies'], "COOKIES")
form_data = None
if dp_rpt.request_type == 'F' and dp_rpt.form_data:
form_data = json.loads(dp_rpt.form_data).copy()
form_data = self._do_req_info_replacements(item, item_num, page, form_data, "FORM DATA")
if url_elem == url_elems[len(url_elems)-1]:
kwargs['meta']['last'] = True
else:
kwargs['meta']['last'] = False
self._set_meta_splash_args()
#logging.info(str(kwargs))
self.log(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", logging.INFO)
msg = "{cs}Calling {dp} URL for item {id}...{ce}".format(
dp=dp_rpt.page_type, id=item._dds_id_str,
cs=self.bcolors["HEADER"], ce=self.bcolors["ENDC"])
self.log(msg, logging.INFO)
msg = "URL : {url}".format(url=url)
self.log(msg, logging.INFO)
self._log_request_info(dp_rpt, form_data, kwargs)
self.log(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>", logging.INFO)
if dp_rpt.request_type == 'R':
yield response.follow(url, callback=self.parse_item, method=dp_rpt.method, dont_filter=dp_rpt.dont_filter, **kwargs)
else:
yield FormRequest(url, callback=self.parse_item, method=dp_rpt.method, formdata=form_data, dont_filter=dp_rpt.dont_filter, **kwargs)
for key, value in list(item.items()):
#Fixing some extremely weird Python 2 encoding failure, 2017-06-29
if type(value).__name__ == 'str':
try:
value = value.decode('utf-8')
except AttributeError:
pass
if value and (type(value).__name__ in ['str', 'unicode']) and '{page}' in value:
msg = "Applying page placeholder on {k}...".format(k=key)
self.log(msg, logging.DEBUG)
self.log("Value before: " + value, logging.DEBUG)
value = value.replace('{page}', str(page))
item[key] = value
self.log("Value after: " + value, logging.DEBUG)
else:
self.log("Item could not be read!", logging.ERROR)
mir_reached = False
if self.conf['MAX_ITEMS_READ'] and (self.conf['MAX_ITEMS_READ'] - self.items_read_count <= 0):
mir_reached = True
if self.scraper.follow_pages_url_xpath and not mir_reached:
if not self.conf['NUM_PAGES_FOLLOW'] or follow_page_num < self.conf['NUM_PAGES_FOLLOW']:
url = response.xpath(self.scraper.follow_pages_url_xpath).extract_first()
if url is not None:
self._set_meta_splash_args()
follow_page = ''
if self.scraper.follow_pages_page_xpath:
follow_page = response.xpath(self.scraper.follow_pages_page_xpath).extract_first()
form_data_orig = None
if self.scraper.get_follow_page_rpts().count() > 0:
f_rpt = self.scraper.get_follow_page_rpts()[0]
form_data_orig = self.scraper.get_follow_page_rpts()[0].form_data
else:
f_rpt = self.scraper.get_main_page_rpt()
form_data_orig = self.scraper.get_main_page_rpt().form_data
kwargs, form_data = self._prepare_mp_req_data(self.fp_request_kwargs, form_data_orig, page, follow_page)
follow_page_num += 1
kwargs['meta']['page_num'] = page_num
kwargs['meta']['follow_page_num'] = follow_page_num
kwargs['meta']['rpt'] = f_rpt
self._log_page_info(page_num, follow_page_num, url, f_rpt, form_data, kwargs)
if f_rpt.request_type == 'R':
yield response.follow(url, callback=self.parse, method=f_rpt.method, dont_filter=f_rpt.dont_filter, **kwargs)
else:
url = response.urljoin(url)
yield FormRequest(url, callback=self.parse, method=f_rpt.method, formdata=form_data, dont_filter=f_rpt.dont_filter, **kwargs)
def _log_request_info(self, rpt, form_data, kwargs):
level = logging.DEBUG
extra_info = False
if 'headers' in kwargs:
self.log("HEADERS : " + str(kwargs['headers']), level)
extra_info = True
if 'body' in kwargs:
self.log("BODY : " + str(kwargs['body']), level)
extra_info = True
if 'cookies' in kwargs:
self.log("COOKIES : " + str(kwargs['cookies']), level)
extra_info = True
if rpt.request_type == 'F' and form_data:
self.log("FORM DATA : " + str(form_data), level)
extra_info = True
if not extra_info:
self.log("No additional request information sent.", level)
def _post_save_tasks(self, sender, instance, created, **kwargs):
if instance and created:
self.scraper.last_scraper_save = datetime.datetime.now()
self.scraper.save()
class DummyItem(scrapy.Item):
tmp_field = scrapy.Field()
| holgerd77/django-dynamic-scraper | dynamic_scraper/spiders/django_spider.py | Python | bsd-3-clause | 40,962 |
#------------------------------------------------------------------------------
# Name: pychrono example
# Purpose:
#
# Author: Lijing Yang
#
# Created: 6/12/2020
# Copyright: (c) ProjectChrono 2019
#------------------------------------------------------------------------------
import pychrono.core as chrono
import pychrono.irrlicht as chronoirr
import math
print ("Example: demonstration of using friction models")
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
# chrono.SetChronoDataPath('../../../../data/')
# Helper class to define a cylindrical shape
class MyObstacle:
def __init__(self, r, pos):
self.radius = r
self.center = pos
def GetVisualization(self):
level = chrono.ChAssetLevel()
cyl = chrono.ChCylinderShape()
cyl.GetCylinderGeometry().rad = self.radius
cyl.GetCylinderGeometry().p1 = self.center + chrono.ChVectorD(0, 0, 0)
cyl.GetCylinderGeometry().p2 = self.center + chrono.ChVectorD(0, 1.1, 0)
level.AddAsset(cyl)
level.AddAsset(chrono.ChColorAsset(0.6, 0.3, 0.0))
return level
# Custom collision detection callback class
class MyCustomCollisionDetection(chrono.CustomCollisionCallback):
def __init__(self, ball, ground,
ball_mat, obst_mat,
ball_radius, obstacle):
super().__init__()
self.m_ball = ball
self.m_ground = ground
self.m_ball_mat = ball_mat
self.m_obst_mat = obst_mat
self.m_ball_radius = ball_radius
self.m_obst_radius = obstacle.radius
self.m_obst_center = obstacle.center
def OnCustomCollision(self, sys):
# super().OnCustomCollision(sys)
r_sum = self.m_ball_radius + self.m_obst_radius
# Get current ball position and project on horizontal plane.
b_pos = self.m_ball.GetPos()
b_center = chrono.ChVectorD(b_pos.x, 0.0, b_pos.z)
# Check collision with obstacle (working in the horizontal plane).
o_center = chrono.ChVectorD(self.m_obst_center.x, 0.0, self.m_obst_center.z)
delta = o_center - b_center
# Get the squared euclidean norm
dist2 = delta.Length2()
if dist2 >= r_sum * r_sum:
return
# Find collision points on the ball and obstacle and the contact normal.
dist = math.sqrt(dist2)
normal = delta / dist
pt_ball = b_center + normal * self.m_ball_radius
pt_obst = o_center - normal * self.m_obst_radius
# Populate the collision info object (express all vectors in 3D).
# We pass null pointers to collision shapes.
contact = chrono.ChCollisionInfo()
contact.modelA = self.m_ball.GetCollisionModel()
contact.modelB = self.m_ground.GetCollisionModel()
contact.shapeA = None
contact.shapeB = None
contact.vN = chrono.ChVectorD(normal.x, 0.0, normal.z)
contact.vpA = chrono.ChVectorD(pt_ball.x, b_pos.y, pt_ball.z)
contact.vpB = chrono.ChVectorD(pt_obst.x, b_pos.y, pt_obst.z)
contact.distance = dist - r_sum
sys.GetContactContainer().AddContact(contact, self.m_ball_mat, self.m_obst_mat)
# ---------------------------------------------------------------------
#
# Create the simulation system and add items
#
# Change use_NSC to specify different contact method
use_NSC = 0
ball_radius = 0.5
obst_radius = 2.0
obst_center = chrono.ChVectorD(2.9, 0, 2.9)
obstacle = MyObstacle(obst_radius, obst_center)
# Create the system and the various contact materials
if use_NSC:
sys = chrono.ChSystemNSC()
g_mat = chrono.ChMaterialSurfaceNSC()
g_mat.SetRestitution(0.9)
g_mat.SetFriction(0.4)
b_mat = chrono.ChMaterialSurfaceNSC()
b_mat.SetRestitution(0.9)
b_mat.SetFriction(0.5)
o_mat = chrono.ChMaterialSurfaceNSC()
o_mat.SetRestitution(0.9)
o_mat.SetFriction(0.4)
ground_mat = g_mat
ball_mat = b_mat
obst_mat = o_mat
time_step = 1e-3
frame_skip = 10
else: # use SMC contact method
sys = chrono.ChSystemSMC()
g_mat = chrono.ChMaterialSurfaceSMC()
g_mat.SetRestitution(0.9)
g_mat.SetFriction(0.4)
b_mat = chrono.ChMaterialSurfaceSMC()
b_mat.SetRestitution(0.9)
b_mat.SetFriction(0.5)
o_mat = chrono.ChMaterialSurfaceSMC()
o_mat.SetRestitution(0.9)
o_mat.SetFriction(0.4)
ground_mat = g_mat
ball_mat = b_mat
obst_mat = o_mat
time_step = 1e-4
frame_skip = 100
sys.Set_G_acc(chrono.ChVectorD(0, -9.8, 0))
# Create the ground body with a plate and side walls (both collision and visualization).
ground = chrono.ChBody()
sys.AddBody(ground)
ground.SetCollide(True)
ground.SetBodyFixed(True)
ground.GetCollisionModel().ClearModel()
ground.GetCollisionModel().AddBox(ground_mat, 5.0, 1.0, 5.0, chrono.ChVectorD(0, -1, 0))
ground.GetCollisionModel().AddBox(ground_mat, 0.1, 1.0, 5.1, chrono.ChVectorD(-5, 0, 0))
ground.GetCollisionModel().AddBox(ground_mat, 0.1, 1.0, 5.1, chrono.ChVectorD( 5, 0, 0))
ground.GetCollisionModel().AddBox(ground_mat, 5.1, 1.0, 0.1, chrono.ChVectorD(0, 0, -5))
ground.GetCollisionModel().AddBox(ground_mat, 5.1, 1.0, 0.1, chrono.ChVectorD(0, 1, 5))
ground.GetCollisionModel().BuildModel()
vshape_1 = chrono.ChBoxShape()
vshape_1.GetBoxGeometry().SetLengths(chrono.ChVectorD(10, 2, 10))
vshape_1.GetBoxGeometry().Pos = chrono.ChVectorD(0, -1, 0)
ground.AddAsset(vshape_1)
vshape_2 = chrono.ChBoxShape()
vshape_2.GetBoxGeometry().SetLengths(chrono.ChVectorD(0.2, 2, 10.2))
vshape_2.GetBoxGeometry().Pos = chrono.ChVectorD(-5, 0, 0)
ground.AddAsset(vshape_2)
vshape_3 = chrono.ChBoxShape()
vshape_3.GetBoxGeometry().SetLengths(chrono.ChVectorD(0.2, 2, 10.2))
vshape_3.GetBoxGeometry().Pos = chrono.ChVectorD(5, 0, 0)
ground.AddAsset(vshape_3)
vshape_4 = chrono.ChBoxShape()
vshape_4.GetBoxGeometry().SetLengths(chrono.ChVectorD(10.2, 2, 0.2))
vshape_4.GetBoxGeometry().Pos = chrono.ChVectorD(0, 0, -5)
ground.AddAsset(vshape_4)
vshape_5 = chrono.ChBoxShape()
vshape_5.GetBoxGeometry().SetLengths(chrono.ChVectorD(10.2, 2, 0.2))
vshape_5.GetBoxGeometry().Pos = chrono.ChVectorD(0, 0, 5)
ground.AddAsset(vshape_5)
ground.AddAsset(chrono.ChTexture(chrono.GetChronoDataFile("textures/blue.png")))
# Add obstacle visualization (in a separate level with a different color).
ground.AddAsset(obstacle.GetVisualization())
# Create the falling ball
ball = chrono.ChBody()
sys.AddBody(ball)
ball.SetMass(10)
comp = 4 * ball_radius * ball_radius
ball.SetInertiaXX(chrono.ChVectorD(comp, comp, comp))
ball.SetPos(chrono.ChVectorD(-3, 1.2 * ball_radius, -3))
ball.SetPos_dt(chrono.ChVectorD(5, 0, 5))
ball.SetCollide(True)
ball.GetCollisionModel().ClearModel()
ball.GetCollisionModel().AddSphere(ball_mat, ball_radius)
ball.GetCollisionModel().BuildModel()
vshape_s = chrono.ChSphereShape()
vshape_s.GetSphereGeometry().rad = ball_radius
vshape_s.GetSphereGeometry().Pos = ball.GetPos()
ball.AddAsset(vshape_s)
ball.AddAsset(chrono.ChTexture(chrono.GetChronoDataFile("textures/bluewhite.png")))
# Create a custom collision detection callback object and register it with the system
my_collision = MyCustomCollisionDetection(ball, ground, ball_mat, obst_mat, ball_radius, obstacle)
sys.RegisterCustomCollisionCallback(my_collision)
# ---------------------------------------------------------------------
#
# Create an Irrlicht application to visualize the system
#
myapplication = chronoirr.ChIrrApp(sys, 'PyChrono example: Custom contact', chronoirr.dimension2du(1024,768))
myapplication.AddTypicalSky()
myapplication.AddTypicalLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
myapplication.AddTypicalCamera(chronoirr.vector3df(8, 8, -6))
myapplication.AddTypicalLights()
# ==IMPORTANT!== Use this function for adding a ChIrrNodeAsset to all items
# in the system. These ChIrrNodeAsset assets are 'proxies' to the Irrlicht meshes.
# If you need a finer control on which item really needs a visualization proxy in
# Irrlicht, just use application.AssetBind(myitem) on a per-item basis.
myapplication.AssetBindAll()
# ==IMPORTANT!== Use this function for 'converting' into Irrlicht meshes the assets
# that you added to the bodies into 3D shapes, they can be visualized by Irrlicht!
myapplication.AssetUpdateAll()
# ---------------------------------------------------------------------
#
# Run the simulation
#
myapplication.SetTimestep(1e-4)
myapplication.SetTryRealtime(True)
frame = 0
while(myapplication.GetDevice().run()):
if frame % 100 == 0:
myapplication.BeginScene()
myapplication.DrawAll()
myapplication.EndScene()
myapplication.DoStep()
frame += 1
| rserban/chrono | src/demos/python/irrlicht/demo_IRR_custom_contact.py | Python | bsd-3-clause | 8,891 |
# This file is part of puppet-panel.
#
# puppet-panel is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# puppet-panel is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with puppet-panel. If not, see <http://www.gnu.org/licenses/>.
from django.core import exceptions
from rest_framework import serializers
import rest_framework.exceptions
import requests.exceptions
import models
import utils
# A serializer that validate its fields using the model 'clean()' method
class ValidatedSerializer(serializers.ModelSerializer):
def validate(self, attrs):
# Validate on an empty model if it is going to be created
instance = self.instance if self.instance else self.Meta.model()
# Add write-only fields for validation
for field in self.Meta.fields:
setattr(instance, field, attrs[field] if field in attrs else '')
try:
instance.clean()
except exceptions.ValidationError as e:
raise serializers.ValidationError(e.error_dict if hasattr(e, 'error_dict') else e)
return {field:getattr(instance, field) for field in self.Meta.fields}
# Classes
class ClassSerializer(serializers.ModelSerializer):
class Meta:
model = models.Class
fields = ('name', 'default')
# Reports
class ReportSerializer_Light(serializers.Serializer):
transaction = serializers.CharField(allow_blank=False, trim_whitespace=False, required=True)
node = serializers.CharField(allow_blank=False, trim_whitespace=False, required=True)
agent_version = serializers.CharField(allow_blank=False, trim_whitespace=False, required=True)
status = serializers.CharField(allow_blank=False, trim_whitespace=False, required=True)
start = serializers.DateTimeField(required=True)
end = serializers.DateTimeField(required=True)
run_time = serializers.SerializerMethodField()
# Method fields
def get_run_time(self, obj):
return obj.run_time.total_seconds()
class ReportSerializer_Full(ReportSerializer_Light):
logs = serializers.SerializerMethodField()
events = serializers.SerializerMethodField()
metrics = serializers.SerializerMethodField()
# Method fields
def get_logs(self, obj):
return [{
'source': log['source'],
'level': log['level'],
'time': log['time'],
'message': log['message'],
'file': '%s:%s' % (log['file'], log['line']) if log['file'] and log['line'] else ''
} for log in obj.logs]
def get_events(self, obj):
return [{
'resource': '%s[%s]' % (event.item['type'], event.item['title']),
'message': event.item['message'],
'status': event.status
} for event in obj.events()]
def get_metrics(self, obj):
metrics = {}
for metric in obj.metrics:
if not metric['category'] in metrics:
metrics[metric['category']] = []
metrics[metric['category']].append({'name': metric['name'], 'value': metric['value']})
return metrics
# Parameters (used in global listing)
class ParameterSerializer(serializers.Serializer):
group = serializers.CharField(allow_blank=False, trim_whitespace=False, required=True)
node = serializers.CharField(allow_blank=False, trim_whitespace=False, required=True)
name = serializers.CharField(allow_blank=False, trim_whitespace=False, required=True)
value = serializers.CharField(allow_blank=False, trim_whitespace=False, required=True)
encrypted = serializers.BooleanField(default=False)
class Meta:
fields = ('group', 'node', 'name', 'value', 'encrypted')
# Groups
class GroupParameterSerializer(ValidatedSerializer):
encrypted = serializers.BooleanField(default=False)
class Meta:
model = models.GroupParameter
fields = ('name', 'value', 'encryption_key', 'encrypted')
class GroupSerializer_Light(serializers.ModelSerializer):
classes = serializers.SlugRelatedField(slug_field='name', queryset=models.Class.objects.all(), many=True, required=False)
parents = serializers.SlugRelatedField(slug_field='name', queryset=models.Group.objects.all(), many=True, required=False)
class Meta:
model = models.Group
fields = ('name', 'default', 'parents', 'classes')
read_only_fields = ()
class GroupSerializer_Full(GroupSerializer_Light):
parameters = GroupParameterSerializer(many=True, read_only=True)
nodes = serializers.SlugRelatedField(slug_field='name', queryset=models.Node.objects.all(), many=True, required=False)
class Meta(GroupSerializer_Light.Meta):
fields = GroupSerializer_Light.Meta.fields + ('parameters', 'nodes')
read_only_fields = GroupSerializer_Light.Meta.read_only_fields + ('parameters', 'nodes')
# Nodes
class NodeParameterSerializer(ValidatedSerializer):
encrypted = serializers.BooleanField(default=False)
class Meta:
model = models.NodeParameter
fields = ('name', 'value', 'encryption_key', 'encrypted')
class NodeSerializer_Light(serializers.ModelSerializer):
status = serializers.SerializerMethodField()
report_timestamp = serializers.SerializerMethodField()
catalog_timestamp = serializers.SerializerMethodField()
facts_timestamp = serializers.SerializerMethodField()
class Meta:
model = models.Node
fields = ('name', 'status', 'report_timestamp', 'catalog_timestamp', 'facts_timestamp')
read_only_fields = ('status', 'report_timestamp', 'catalog_timestamp', 'facts_timestamp')
def get_node(self, name):
# Load the nodes
if not hasattr(self, 'node'):
self.node = {}
try:
db = utils.puppetdb_connect()
for node in db.nodes(with_status=True):
self.node[node.name] = node
except Exception as e:
raise rest_framework.exceptions.APIException('Can\'t get node from PuppetDB: %s' % e)
# Not found node
if not name in self.node:
self.node[name] = None
return self.node[name]
# Method fields
def get_status(self, obj):
node = self.get_node(obj.name)
return node.status if node else 'unknown'
def get_report_timestamp(self, obj):
node = self.get_node(obj.name)
return node.report_timestamp if node else None
def get_catalog_timestamp(self, obj):
node = self.get_node(obj.name)
return node.catalog_timestamp if node else None
def get_facts_timestamp(self, obj):
node = self.get_node(obj.name)
return node.facts_timestamp if node else None
class NodeSerializer_Full(NodeSerializer_Light):
classes = serializers.SlugRelatedField(slug_field='name', queryset=models.Class.objects.all(), many=True, required=False)
groups = serializers.SlugRelatedField(slug_field='name', queryset=models.Group.objects.all(), many=True, required=False)
parameters = NodeParameterSerializer(many=True, read_only=True)
reports = serializers.SerializerMethodField()
certificate = serializers.SerializerMethodField()
class Meta(NodeSerializer_Light.Meta):
fields = NodeSerializer_Light.Meta.fields + ('groups', 'classes', 'parameters', 'reports', 'certificate')
read_only_fields = NodeSerializer_Light.Meta.read_only_fields + ('parameters', 'reports', 'certificate')
# Method fields
def get_reports(self, obj):
node = self.get_node(obj.name)
return ReportSerializer_Light(node.reports(), many=True).data if node else []
def get_certificate(self, obj):
try:
result = utils.puppetca_query('GET', 'certificate_status/%s' % obj.name)
return result.json()
except Exception as e:
return None
class NodeSerializer_Enc(serializers.Serializer):
classes = serializers.StringRelatedField(many=True)
parameters = NodeParameterSerializer(many=True)
# Orphans
class OrphanSerializer(serializers.Serializer):
name = serializers.CharField(allow_blank=False, trim_whitespace=False, required=True)
source = serializers.CharField(allow_blank=False, trim_whitespace=False, required=True)
# Nodes status
class StatusSerializer(serializers.Serializer):
unchanged = serializers.IntegerField(min_value=0, required=True)
changed = serializers.IntegerField(min_value=0, required=True)
failed = serializers.IntegerField(min_value=0, required=True)
unreported = serializers.IntegerField(min_value=0, required=True)
unknown = serializers.IntegerField(min_value=0, required=True)
total = serializers.IntegerField(min_value=0, required=True)
# Certificates
class CertificateSerializer_Read(serializers.Serializer):
name = serializers.CharField(allow_blank=False, trim_whitespace=False)
dns_alt_names = serializers.ListField(serializers.CharField(allow_blank=False, trim_whitespace=False))
state = serializers.CharField(allow_blank=False, trim_whitespace=False)
fingerprint = serializers.CharField(allow_blank=False, trim_whitespace=False)
class CertificateSerializer_Write(serializers.Serializer):
state = serializers.ChoiceField(['signed', 'revoked'], allow_blank=False)
| ybulach/puppet-panel | puppet/serializers.py | Python | lgpl-3.0 | 9,664 |
#coding=utf-8
"""
Algorithms about probability
Author: 段凯强
"""
import math
def entropyOfList(ls):
"""
Given a list of some items, compute entropy of the list
The entropy is sum of -p[i]*log(p[i]) for every unique element i in the list, and p[i] is its frequency
"""
elements = {}
for e in ls:
elements[e] = elements.get(e, 0) + 1
length = float(len(ls))
return sum(map(lambda v: -v/length*math.log(v/length), elements.values()))
| dnlp/ChineseWordSegmentation | probability.py | Python | mit | 481 |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 9 10:06:20 2015
@author: jpk
ToDo: automate the subsystems check. A query that checks all the subsystems in
case things change in the future should prevent issues with the pis chart
colours
"""
import sys
import os
import pandas as pd
import pandas.io.sql as psql
import MySQLdb
import matplotlib.pyplot as pl
import report_queries as rq
import numpy as np
import matplotlib.dates as mdates
def priority_breakdown_pie_chart(x, ds, dirname='./logs/'):
'''
make a pie chart from the dataframe
'''
temp = list(x['Priority'])
no_blocks = map(int, list(x['No. Blocks']))
labels = ['P'+str(temp[i])+' - ' + str(no_blocks[i]) for i in range(0,len(temp))]
values = list(x['Tsec'])
# set colours for the priorities
colours = ['b','c','g','m','r']
fig = pl.figure(facecolor='w', figsize=[5, 5])
ax = fig.add_subplot(111)
ax.set_aspect=1
pie_wedge_collection = ax.pie(values,
colors=colours,
pctdistance=0.8,
radius = 0.95,
autopct='%1.1f%%',
textprops = {'fontsize':10,
'color':'w'},
wedgeprops = {'edgecolor':'white'})
ax.legend(labels=labels, frameon=False, loc=(-0.15,0.7), fontsize=8)
title_txt = 'Weekly Priority Breakdown - ' + str(int(x['No. Blocks'].sum())) + ' Blocks Total' + '\n {}'.format(ds)
ax.set_title(title_txt, fontsize=12)
filename = dirname+'priority_breakdown_pie_chart_' +'-'.join([ds.split()[0].replace('-',''), ds.split()[2].replace('-','')])+'.png'
pl.savefig(filename, dpi=100)
# pl.show()
def weekly_total_time_breakdown_pie_chart(x, ds, dirname='./logs/'):
labels = ['Science - {}'.format(x['ScienceTime'][0]),
'Engineering - {}'.format(x['EngineeringTime'][0]),
'Weather - {}'.format(x['TimeLostToWeather'][0]),
'Problems - {}'.format(x['TimeLostToProblems'][0])]
values = [int(x['Science']),
int(x['Engineering']),
int(x['Weather']),
int(x['Problems'])]
colours = ['b','c','g','r']
fig = pl.figure(facecolor='w', figsize=[5, 5])
ax = fig.add_subplot(111)
ax.set_aspect=1
pie_wedge_collection = ax.pie(values,
colors=colours,
pctdistance=0.8,
radius = 0.95,
autopct='%1.1f%%',
textprops = {'fontsize':10,
'color':'w'},
wedgeprops = {'edgecolor':'white'})
ax.legend(labels=labels, frameon=False, loc=(-0.15,0.8), fontsize=8)
title_txt = 'Weekly Time Breakdown - {} Total\n{}'.format(x['NightLength'][0], ds)
ax.set_title(title_txt, fontsize=12)
filename = 'weekly_total_time_breakdown_pie_chart_' + '-'.join([ds.split()[0].replace('-',''), ds.split()[2].replace('-','')])+'.png'
pl.savefig(dirname+filename, dpi=100)
# pl.show()
def weekly_subsystem_breakdown_pie_chart(x, y, col_dict, ds, dirname='./logs/'):
subsystem = list(x['SaltSubsystem'])
time = list(x['TotalTime'])
labels = [subsystem[i] + ' - ' + time[i] for i in range(0,len(subsystem))]
values = list(x['Time'])
colours = [col_dict[i] for i in subsystem]
fig = pl.figure(facecolor='w', figsize=[5, 5])
ax = fig.add_subplot(111)
ax.set_aspect=1
pie_wedge_collection = ax.pie(values,
colors=colours,
pctdistance=0.8,
radius = 0.95,
autopct='%1.1f%%',
textprops = {'fontsize':10,
'color':'k'},
wedgeprops = {'edgecolor':'white'})
ax.legend(labels=labels, frameon=False, loc=(-0.15,0.65), fontsize=8)
title_txt = 'Weekly Problems Breakdown - {}\n{}'.format(y['TotalTime'][0], ds)
ax.set_title(title_txt, fontsize=12)
filename = 'weekly_subsystem_breakdown_pie_chart_'+'-'.join([ds.split()[0].replace('-',''), ds.split()[2].replace('-','')])+'.png'
pl.savefig(dirname+filename, dpi=100)
# pl.show()
def weekly_time_breakdown(x, ds, dirname='./logs/'):
'''
produce a bar stacked bar chart plot of the time breakdown per day for the
past week.
'''
fig = pl.figure(figsize=(10,4),facecolor='w')
ax = fig.add_subplot(111)
width = 0.55
ax.grid(which='major', axis='y')
# science time per day
s = ax.bar(x['Date'],
x['Science'],
width,
color = 'b',
edgecolor='w')
# engineering time per day
e = ax.bar(x['Date'],
x['Engineering'],
width,
bottom = x['Science'],
color = 'c',
edgecolor='w')
# weather time per day
w = ax.bar(x['Date'],
x['Weather'],
width,
bottom = x['Science'] + x['Engineering'],
color = 'g',
edgecolor='w')
# problem time per day
p = ax.bar(x['Date'],
x['Problems'],
width,
bottom = x['Science'] + x['Engineering'] + x['Weather'],
color = 'r',
edgecolor='w')
ax.set_ylabel('Hours', fontsize=11)
ax.set_xlabel('Date', fontsize=11)
fig.legend((s[0], e[0], w[0], p[0]),
('Science Time',
'Engineering Time',
'Time lost to Weather',
'Time lost to Problems'),
frameon=False,
fontsize=10,
loc=(0.0,0.70))
title_txt = 'Weekly Time Breakdown - {}'.format(ds)
ax.set_title(title_txt, fontsize=11)
ax.xaxis_date()
date_formatter = mdates.DateFormatter('%a \n %Y-%m-%d')
ax.xaxis.set_major_formatter(date_formatter)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(8)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(8)
fig.autofmt_xdate(rotation=0, ha = 'left')
fig.subplots_adjust(left=0.22, bottom=0.20, right=0.96, top=None,
wspace=None, hspace=None)
pl.autoscale()
filename = 'weekly_time_breakdown_'+'-'.join([ds.split()[0].replace('-',''), ds.split()[2].replace('-','')])+'.png'
pl.savefig(dirname+filename, dpi=100)
# pl.show()
if __name__=='__main__':
# set the colours for all the subsystems:
subsystems_list = ['BMS', 'DOME', 'TC', 'PMAS', 'SCAM', 'TCS', 'STRUCT',
'TPC', 'HRS', 'PFIS','Proposal', 'Operations',
'ELS', 'ESKOM']
cmap = pl.cm.jet
colour_map = cmap(np.linspace(0.0, 1.0, len(subsystems_list)))
col_dict = {}
for i in range(0, len(subsystems_list)):
col_dict[subsystems_list[i]] = colour_map[i]
# open mysql connection to the sdb
mysql_con = MySQLdb.connect(host='sdb.cape.saao.ac.za',
port=3306,user=os.environ['SDBUSER'],
passwd=os.environ['SDBPASS'], db='sdb')
obsdate = sys.argv[1]
date = '{}-{}-{}'.format(obsdate[0:4], obsdate[4:6], obsdate[6:8])
interval = sys.argv[2]
# use the connection to get the required data: _d
dr_d = rq.date_range(mysql_con, date, interval=interval)
wpb_d = rq.weekly_priority_breakdown(mysql_con, date, interval=interval)
wtb_d = rq.weekly_time_breakdown(mysql_con, date, interval=interval)
wttb_d = rq.weekly_total_time_breakdown(mysql_con, date, interval=interval)
wsb_d = rq.weekly_subsystem_breakdown(mysql_con, date, interval=interval)
wsbt_d = rq.weekly_subsystem_breakdown_total(mysql_con, date, interval=interval)
wtb_d = rq.weekly_time_breakdown(mysql_con, date, interval=interval)
date_string = '{} - {}'.format(dr_d['StartDate'][0], dr_d['EndDate'][0])
# testing the pie_chart method
priority_breakdown_pie_chart(wpb_d, date_string)
weekly_total_time_breakdown_pie_chart(wttb_d, date_string)
weekly_subsystem_breakdown_pie_chart(wsb_d, wsbt_d, col_dict, date_string)
weekly_time_breakdown(wtb_d, date_string)
mysql_con.close()
| dr-jpk/saltefficiency | weekly/weekly_summary_plots.py | Python | bsd-3-clause | 8,536 |
"""Added index for itemrevision created_date
Revision ID: 6b9d673d8e30
Revises: 2ea41f4610fd
Create Date: 2016-04-01 09:39:35.148502
"""
# revision identifiers, used by Alembic.
revision = '6b9d673d8e30'
down_revision = '2ea41f4610fd'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_index('ix_itemrevision_date_created', 'itemrevision', ['date_created'], unique=False)
def downgrade():
op.drop_index('ix_itemrevision_date_created', table_name='itemrevision')
| stackArmor/security_monkey | migrations/versions/6b9d673d8e30_added_index_for_itemrevision_created.py | Python | apache-2.0 | 499 |
# -*- coding: utf-8 -*-
"""Class to trap stdout and stderr and log them separately.
$Id: OutputTrap.py 958 2005-12-27 23:17:51Z fperez $"""
#*****************************************************************************
# Copyright (C) 2001-2004 Fernando Perez <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
from IPython import Release
__author__ = '%s <%s>' % Release.authors['Fernando']
__license__ = Release.license
import exceptions
import sys
from cStringIO import StringIO
class OutputTrapError(exceptions.Exception):
"""Exception for OutputTrap class."""
def __init__(self,args=None):
exceptions.Exception.__init__(self)
self.args = args
class OutputTrap:
"""Class to trap standard output and standard error. They get logged in
StringIO objects which are available as <instance>.out and
<instance>.err. The class also offers summary methods which format this
data a bit.
A word of caution: because it blocks messages, using this class can make
debugging very tricky. If you are having bizarre problems silently, try
turning your output traps off for a while. You can call the constructor
with the parameter debug=1 for these cases. This turns actual trapping
off, but you can keep the rest of your code unchanged (this has already
been a life saver).
Example:
# config: trapper with a line of dots as log separator (final '\\n' needed)
config = OutputTrap('Config','Out ','Err ','.'*80+'\\n')
# start trapping output
config.trap_all()
# now all output is logged ...
# do stuff...
# output back to normal:
config.release_all()
# print all that got logged:
print config.summary()
# print individual raw data:
print config.out.getvalue()
print config.err.getvalue()
"""
def __init__(self,name='Generic Output Trap',
out_head='Standard Output. ',err_head='Standard Error. ',
sum_sep='\n',debug=0,trap_out=0,trap_err=0,
quiet_out=0,quiet_err=0):
self.name = name
self.out_head = out_head
self.err_head = err_head
self.sum_sep = sum_sep
self.out = StringIO()
self.err = StringIO()
self.out_save = None
self.err_save = None
self.debug = debug
self.quiet_out = quiet_out
self.quiet_err = quiet_err
if trap_out:
self.trap_out()
if trap_err:
self.trap_err()
def trap_out(self):
"""Trap and log stdout."""
if sys.stdout is self.out:
raise OutputTrapError,'You are already trapping stdout.'
if not self.debug:
self._out_save = sys.stdout
sys.stdout = self.out
def release_out(self):
"""Release stdout."""
if not self.debug:
if not sys.stdout is self.out:
raise OutputTrapError,'You are not trapping stdout.'
sys.stdout = self._out_save
self.out_save = None
def summary_out(self):
"""Return as a string the log from stdout."""
out = self.out.getvalue()
if out:
if self.quiet_out:
return out
else:
return self.out_head + 'Log by '+ self.name + ':\n' + out
else:
return ''
def flush_out(self):
"""Flush the stdout log. All data held in the log is lost."""
self.out.close()
self.out = StringIO()
def trap_err(self):
"""Trap and log stderr."""
if sys.stderr is self.err:
raise OutputTrapError,'You are already trapping stderr.'
if not self.debug:
self._err_save = sys.stderr
sys.stderr = self.err
def release_err(self):
"""Release stderr."""
if not self.debug:
if not sys.stderr is self.err:
raise OutputTrapError,'You are not trapping stderr.'
sys.stderr = self._err_save
self.err_save = None
def summary_err(self):
"""Return as a string the log from stderr."""
err = self.err.getvalue()
if err:
if self.quiet_err:
return err
else:
return self.err_head + 'Log by '+ self.name + ':\n' + err
else:
return ''
def flush_err(self):
"""Flush the stdout log. All data held in the log is lost."""
self.err.close()
self.err = StringIO()
def trap_all(self):
"""Trap and log both stdout and stderr.
Cacthes and discards OutputTrapError exceptions raised."""
try:
self.trap_out()
except OutputTrapError:
pass
try:
self.trap_err()
except OutputTrapError:
pass
def release_all(self):
"""Release both stdout and stderr.
Cacthes and discards OutputTrapError exceptions raised."""
try:
self.release_out()
except OutputTrapError:
pass
try:
self.release_err()
except OutputTrapError:
pass
def summary_all(self):
"""Return as a string the log from stdout and stderr, prepending a separator
to each (defined in __init__ as sum_sep)."""
sum = ''
sout = self.summary_out()
if sout:
sum += self.sum_sep + sout
serr = self.summary_err()
if serr:
sum += '\n'+self.sum_sep + serr
return sum
def flush_all(self):
"""Flush stdout and stderr"""
self.flush_out()
self.flush_err()
# a few shorthands
trap = trap_all
release = release_all
summary = summary_all
flush = flush_all
# end OutputTrap
#****************************************************************************
# Module testing. Incomplete, I'm lazy...
def _test_all():
"""Module testing functions, activated when the module is called as a
script (not imported)."""
# Put tests for this module in here.
# Define them as nested functions so they don't clobber the
# pydoc-generated docs
def _test_():
name = ''
print '#'*50+'\nRunning test for ' + name
# ...
print 'Finished test for '+ name +'\n'+'#'*50
def _test_OutputTrap():
trap = OutputTrap(name = 'Test Trap', sum_sep = '.'*50+'\n',
out_head = 'SOut. ', err_head = 'SErr. ')
name = 'OutputTrap class'
print '#'*50+'\nRunning test for ' + name
print 'Trapping out'
trap.trap_out()
print >>sys.stdout, '>>stdout. stdout is trapped.'
print >>sys.stderr, '>>stderr. stdout is trapped.'
trap.release_out()
print trap.summary_out()
print 'Trapping err'
trap.trap_err()
print >>sys.stdout, '>>stdout. stderr is trapped.'
print >>sys.stderr, '>>stderr. stderr is trapped.'
trap.release_err()
print trap.summary_err()
print 'Trapping all (no flushing)'
trap.trap_all()
print >>sys.stdout, '>>stdout. stdout/err is trapped.'
print >>sys.stderr, '>>stderr. stdout/err is trapped.'
trap.release_all()
print trap.summary_all()
print 'Trapping all (flushing first)'
trap.flush()
trap.trap_all()
print >>sys.stdout, '>>stdout. stdout/err is trapped.'
print >>sys.stderr, '>>stderr. stdout/err is trapped.'
trap.release_all()
print trap.summary_all()
print 'Finished test for '+ name +'\n'+'#'*50
# call the actual tests here:
_test_OutputTrap()
if __name__=="__main__":
# _test_all() # XXX BROKEN.
pass
#************************ end of file <OutputTrap.py> ************************
| santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/ipython-0.8.2-py2.5.egg/IPython/OutputTrap.py | Python | bsd-3-clause | 8,014 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Rasmus Sorensen, rasmusscholer@gmail.com <scholer.github.io>
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
# pylint: disable=C0103,W0142
import os
import yaml
import argparse
import getpass
#from six import string_types
import logging
logger = logging.getLogger(__name__)
#from urllib.parse import urljoin, urlsplit
import hashlib
LIBDIR = os.path.dirname(os.path.realpath(__file__))
def filehexdigest(filepath, digesttype='md5'):
"""
Returns hex digest of file in filepath.
Mostly for reference, since this is so short.
"""
m = hashlib.new(digesttype) # generic; can also be e.g. hashlib.md5()
with open(filepath, 'rb') as fd:
# md5 sum default is 128 = 2**7-bytes digest block. However, file read is faster for e.g. 8 kb blocks.
# http://stackoverflow.com/questions/1131220/get-md5-hash-of-big-files-in-python
for chunk in iter(lambda: fd.read(128*m.block_size), b''):
m.update(chunk)
return m.hexdigest()
def calc_checksum(bytearr, digesttype='md5'):
"""
Calculate checksum of in-memory bytearray.
Mostly for reference, since this is so short.
"""
m = hashlib.new(digesttype) # generic; can also be e.g. hashlib.md5()
m.update(bytearr)
return m.hexdigest()
def credentials_prompt(user='', password=''):
""" Simple method to prompt for user credentials. """
if not user:
user = getpass.getuser()
user = input("User: [%s]" % user) or user
password = getpass.getpass() or password
return user, password
def load_config(filepath=None):
"""
Load config from file:
Default path: "~/.config/ezfetcher/ezfetcher.yaml"
Other paths:
"~/.ezfetcher.yaml"
"~/.ezfetcher/ezfetcher.yaml"
"~/.config/ezfetcher.yaml"
"~/.config/ezfetcher/config.yaml"
"""
if filepath is None:
filepath = os.path.expanduser("~/.config/ezfetcher/ezfetcher.yaml")
filepath = os.path.normpath(filepath)
try:
config = yaml.load(open(filepath))
logger.debug("Config with %s keys loaded from file: %s", len(config), filepath)
return config
except FileNotFoundError:
logger.debug("Config file not found: %s, returning empty dict...", filepath)
return {}
def save_config(config, filepath=None):
""" Save config to file. """
if filepath is None:
filepath = os.path.expanduser("~/.ezfetcher.yaml")
yaml.dump(config, open(filepath, 'w'))
logger.debug("Config with %s keys dumped to file: %s", len(config), filepath)
def get_config(args=None, config_fpath=None):
""" Get config, merging args with persistent config. """
# Load config:
config = load_config(config_fpath)
# Merge with args:
if isinstance(args, argparse.Namespace):
args = args.__dict__
for key, value in args.items():
if value is not None:
config[key] = value
logger.debug("Returning merged config with args, has %s keys", len(config))
return config
def init_logging(args=None):#, prefix="EzFetcher"):
"""
Set up standard logging system based on values provided by argsns, namely:
- loglevel
- logtofile
- testing
"""
if args is None:
args = {}
loguserfmt = "%(asctime)s %(levelname)-5s %(name)20s:%(lineno)-4s%(funcName)20s() %(message)s"
logtimefmt = "%H:%M:%S" # Nicer for output to user in console and testing.
if args.get('loglevel'):
try:
loglevel = int(args['loglevel'])
except (TypeError, ValueError):
loglevel = getattr(logging, args['loglevel'].upper())
else:
loglevel = logging.DEBUG if args.get('testing') else logging.INFO
logging.basicConfig(level=loglevel,
format=loguserfmt,
datefmt=logtimefmt)
# filename='example.log',
#)
logger.info("Logging system initialized with loglevel %s", loglevel)
print("args:", args)
| scholer/ezfetcher | ezfetcher/utils.py | Python | gpl-3.0 | 4,644 |
"""The tests for the MQTT binary sensor platform."""
import unittest
from homeassistant.bootstrap import setup_component
import homeassistant.components.binary_sensor as binary_sensor
from tests.common import mock_mqtt_component, fire_mqtt_message
from homeassistant.const import (STATE_OFF, STATE_ON)
from tests.common import get_test_home_assistant
class TestSensorMQTT(unittest.TestCase):
"""Test the MQTT sensor."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
mock_mqtt_component(self.hass)
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_setting_sensor_value_via_mqtt_message(self):
"""Test the setting of the value via MQTT."""
self.hass.config.components = ['mqtt']
assert setup_component(self.hass, binary_sensor.DOMAIN, {
binary_sensor.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test-topic',
'payload_on': 'ON',
'payload_off': 'OFF',
}
})
state = self.hass.states.get('binary_sensor.test')
self.assertEqual(STATE_OFF, state.state)
fire_mqtt_message(self.hass, 'test-topic', 'ON')
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test')
self.assertEqual(STATE_ON, state.state)
fire_mqtt_message(self.hass, 'test-topic', 'OFF')
self.hass.block_till_done()
state = self.hass.states.get('binary_sensor.test')
self.assertEqual(STATE_OFF, state.state)
def test_valid_sensor_class(self):
"""Test the setting of a valid sensor class."""
self.hass.config.components = ['mqtt']
assert setup_component(self.hass, binary_sensor.DOMAIN, {
binary_sensor.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'sensor_class': 'motion',
'state_topic': 'test-topic',
}
})
state = self.hass.states.get('binary_sensor.test')
self.assertEqual('motion', state.attributes.get('sensor_class'))
def test_invalid_sensor_class(self):
"""Test the setting of an invalid sensor class."""
self.hass.config.components = ['mqtt']
assert setup_component(self.hass, binary_sensor.DOMAIN, {
binary_sensor.DOMAIN: {
'platform': 'mqtt',
'name': 'test',
'sensor_class': 'abc123',
'state_topic': 'test-topic',
}
})
state = self.hass.states.get('binary_sensor.test')
self.assertIsNone(state.attributes.get('sensor_class'))
| xifle/home-assistant | tests/components/binary_sensor/test_mqtt.py | Python | mit | 2,858 |
# PROPKA 3.1
#
#
# setuptools installation of PROPKA 3.1
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
VERSION = "3.1"
setup(name="PROPKA",
version=VERSION,
description="Heuristic pKa calculations with ligands",
long_description="""
PROPKA predicts the pKa values of ionizable groups in proteins (version 3.0) and
protein-ligand complexes (version 3.1) based on the 3D structure.
For proteins without ligands both version should produce the same result.
The method is described in the following papers, which you should cite
in publications:
* Sondergaard, Chresten R., Mats HM Olsson, Michal Rostkowski, and Jan
H. Jensen. "Improved Treatment of Ligands and Coupling Effects in
Empirical Calculation and Rationalization of pKa Values." Journal of
Chemical Theory and Computation 7, no. 7 (2011): 2284-2295.
* Olsson, Mats HM, Chresten R. Sondergaard, Michal Rostkowski, and Jan
H. Jensen. "PROPKA3: consistent treatment of internal and surface
residues in empirical pKa predictions." Journal of Chemical Theory
and Computation 7, no. 2 (2011): 525-537.
See http://propka.ki.ku.dk/ for the PROPKA web server,
using the tutorial http://propka.ki.ku.dk/~luca/wiki/index.php/PROPKA_3.1_Tutorial .
""",
author="Jan H. Jensen",
author_email="jhjensen@chem.ku.dk",
license="",
url="http://propka.ki.ku.dk/",
keywords="science",
packages=find_packages(exclude=['scripts']),
package_data = {'propka': ['*.dat', '*.cfg']},
#scripts = ["scripts/propka31.py"], # use entry point below
entry_points = {
'console_scripts': [
'propka31 = propka.run:main',
],
},
zip_safe=True,
test_suite="Tests",
)
| vanarbulax/propka-3.1 | setup.py | Python | lgpl-2.1 | 1,771 |
from dstack import type_manager
class ResponseHolder:
def __init__(self):
self._responses = []
self.priority = type_manager.PRIORITY_PRE
def clear(self):
self._responses = []
def publish(self, resp):
self._responses = resp
def list(self):
return self._responses
def get(self):
if len(self._responses) != 0:
raise Exception("Expected a response")
return self._response[1]
| ibuildthecloud/dstack | code/agent/src/agents/pyagent/tests/response_holder.py | Python | apache-2.0 | 465 |
__source__ = 'https://leetcode.com/problems/asteroid-collision/'
# Time: O(N)
# Space: O(N)
#
# Description: Leetcode # 735. Asteroid Collision
#
# We are given an array asteroids of integers representing asteroids in a row.
#
# For each asteroid, the absolute value represents its size,
# and the sign represents its direction (positive meaning right,
# negative meaning left). Each asteroid moves at the same speed.
#
# Find out the state of the asteroids after all collisions.
# If two asteroids meet, the smaller one will explode.
# If both are the same size, both will explode.
# Two asteroids moving in the same direction will never meet.
#
# Example 1:
# Input:
# asteroids = [5, 10, -5]
# Output: [5, 10]
# Explanation:
# The 10 and -5 collide resulting in 10. The 5 and 10 never collide.
# Example 2:
# Input:
# asteroids = [8, -8]
# Output: []
# Explanation:
# The 8 and -8 collide exploding each other.
# Example 3:
# Input:
# asteroids = [10, 2, -5]
# Output: [10]
# Explanation:
# The 2 and -5 collide resulting in -5. The 10 and -5 collide resulting in 10.
# Example 4:
# Input:
# asteroids = [-2, -1, 1, 2]
# Output: [-2, -1, 1, 2]
# Explanation:
# The -2 and -1 are moving left, while the 1 and 2 are moving right.
# Asteroids moving the same direction never meet, so no asteroids will meet each other.
# Note:
#
# The length of asteroids will be at most 10000.
# Each asteroid will be a non-zero integer in the range [-1000, 1000]..
#
import unittest
#40ms 80.99%
class Solution(object):
def asteroidCollision(self, asteroids):
"""
:type asteroids: List[int]
:rtype: List[int]
"""
ans = []
for new in asteroids:
while ans and new < 0 < ans[-1]:
if ans[-1] < -new:
ans.pop()
continue
elif ans[-1] == -new:
ans.pop()
break
else:
ans.append(new)
return ans
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/asteroid-collision/solution/
# Approach #1: Stack [Accepted]
# Complexity Analysis
# Time Complexity: O(N), where N is the number of asteroids. Our stack pushes and pops each asteroid at most once.
# Space Complexity: O(N), the size of ans.
# 24ms 44.23%
class Solution {
public int[] asteroidCollision(int[] asteroids) {
Stack<Integer> s = new Stack<>();
boolean doPush;
for (int n : asteroids) {
doPush = true;
while (!s.isEmpty() && s.peek() > 0 && n < 0) {
int cur = s.pop();
if (cur > Math.abs(n)) n = cur;
else if (cur == Math.abs(n)) {
doPush = false;
break;
}
}
if (doPush) s.push(n);
}
int[] ans = new int[s.size()];
for (int i = s.size() - 1; i >= 0; i--) {
ans[i] = s.pop();
}
return ans;
}
}
# 10ms 99.72%
class Solution {
public int[] asteroidCollision(int[] asteroids) {
int l = -1, r = 0, n = asteroids.length;
while (r < n) {
//System.out.println("Entering while loop with l: " + l + ", r: " + r);
if (l >= 0 && asteroids[l] > 0 && asteroids[r] < 0) {
if (asteroids[l] > -asteroids[r]) r++;
else if (asteroids[l] < -asteroids[r]) l--;
else {
r++;
l--;
}
continue; //start from while loop
}
//System.out.println("l: " + l + ", r: " + r);
asteroids[++l] = asteroids[r++];
}
return Arrays.copyOf(asteroids, l + 1);
}
}
'''
| JulyKikuAkita/PythonPrac | cs15211/AsteroidCollision.py | Python | apache-2.0 | 3,884 |
#===============================================================================
# Copyright (c) 2015, Max Zwiessele
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy.plotting.matplot_dep.plot_definitions nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from ..abstract_plotting_library import AbstractPlottingLibrary
from .. import Tango
from . import defaults
from plotly import tools
from plotly import plotly as py
from plotly.graph_objs import Scatter, Scatter3d, Line,\
Marker, ErrorX, ErrorY, Bar, Heatmap, Trace,\
Annotations, Annotation, Contour, Font, Surface
from plotly.exceptions import PlotlyDictKeyError
SYMBOL_MAP = {
'o': 'dot',
'v': 'triangle-down',
'^': 'triangle-up',
'<': 'triangle-left',
'>': 'triangle-right',
's': 'square',
'+': 'cross',
'x': 'x',
'*': 'x', # no star yet in plotly!!
'D': 'diamond',
'd': 'diamond',
}
class PlotlyPlots(AbstractPlottingLibrary):
def __init__(self):
super(PlotlyPlots, self).__init__()
self._defaults = defaults.__dict__
self.current_states = dict()
def figure(self, rows=1, cols=1, specs=None, is_3d=False, **kwargs):
if specs is None:
specs = [[{'is_3d': is_3d}]*cols]*rows
figure = tools.make_subplots(rows, cols, specs=specs, **kwargs)
return figure
def new_canvas(self, figure=None, row=1, col=1, projection='2d',
xlabel=None, ylabel=None, zlabel=None,
title=None, xlim=None,
ylim=None, zlim=None, **kwargs):
#if 'filename' not in kwargs:
# print('PlotlyWarning: filename was not given, this may clutter your plotly workspace')
# filename = None
#else:
# filename = kwargs.pop('filename')
if figure is None:
figure = self.figure(is_3d=projection=='3d')
figure.layout.font = Font(family="Raleway, sans-serif")
if projection == '3d':
figure.layout.legend.x=.5
figure.layout.legend.bgcolor='#DCDCDC'
return (figure, row, col), kwargs
def add_to_canvas(self, canvas, traces, legend=False, **kwargs):
figure, row, col = canvas
def append_annotation(a, xref, yref):
if 'xref' not in a:
a['xref'] = xref
if 'yref' not in a:
a['yref'] = yref
figure.layout.annotations.append(a)
def append_trace(t, row, col):
figure.append_trace(t, row, col)
def recursive_append(traces):
if isinstance(traces, Annotations):
xref, yref = figure._grid_ref[row-1][col-1]
for a in traces:
append_annotation(a, xref, yref)
elif isinstance(traces, (Trace)):
try:
append_trace(traces, row, col)
except PlotlyDictKeyError:
# Its a dictionary of plots:
for t in traces:
recursive_append(traces[t])
elif isinstance(traces, (dict)):
for t in traces:
recursive_append(traces[t])
elif isinstance(traces, (tuple, list)):
for t in traces:
recursive_append(t)
recursive_append(traces)
figure.layout['showlegend'] = legend
return canvas
def show_canvas(self, canvas, filename=None, **kwargs):
figure, _, _ = canvas
if len(figure.data) == 0:
# add mock data
figure.append_trace(Scatter(x=[], y=[], name='', showlegend=False), 1, 1)
from ..gpy_plot.plot_util import in_ipynb
if in_ipynb():
return py.iplot(figure, filename=filename)#self.current_states[hex(id(figure))]['filename'])
else:
return py.plot(figure, filename=filename)#self.current_states[hex(id(figure))]['filename'])
def scatter(self, ax, X, Y, Z=None, color=Tango.colorsHex['mediumBlue'], cmap=None, label=None, marker='o', marker_kwargs=None, **kwargs):
try:
marker = SYMBOL_MAP[marker]
except:
#not matplotlib marker
pass
marker_kwargs = marker_kwargs or {}
if 'symbol' not in marker_kwargs:
marker_kwargs['symbol'] = marker
if Z is not None:
return Scatter3d(x=X, y=Y, z=Z, mode='markers',
showlegend=label is not None,
marker=Marker(color=color, colorscale=cmap, **marker_kwargs),
name=label, **kwargs)
return Scatter(x=X, y=Y, mode='markers', showlegend=label is not None,
marker=Marker(color=color, colorscale=cmap, **marker_kwargs),
name=label, **kwargs)
def plot(self, ax, X, Y, Z=None, color=None, label=None, line_kwargs=None, **kwargs):
if 'mode' not in kwargs:
kwargs['mode'] = 'lines'
if Z is not None:
return Scatter3d(x=X, y=Y, z=Z, showlegend=label is not None, line=Line(color=color, **line_kwargs or {}), name=label, **kwargs)
return Scatter(x=X, y=Y, showlegend=label is not None, line=Line(color=color, **line_kwargs or {}), name=label, **kwargs)
def plot_axis_lines(self, ax, X, color=Tango.colorsHex['mediumBlue'], label=None, marker_kwargs=None, **kwargs):
if X.shape[1] == 1:
annotations = Annotations()
for i, row in enumerate(X):
annotations.append(
Annotation(
text='',
x=row[0], y=0,
yref='paper',
ax=0, ay=20,
arrowhead=2,
arrowsize=1,
arrowwidth=2,
arrowcolor=color,
showarrow=True,
#showlegend=i==0,
#label=label,
))
return annotations
elif X.shape[1] == 2:
marker_kwargs.setdefault('symbol', 'diamond')
opacity = kwargs.pop('opacity', .8)
return Scatter3d(x=X[:, 0], y=X[:, 1], z=np.zeros(X.shape[0]),
mode='markers',
projection=dict(z=dict(show=True, opacity=opacity)),
marker=Marker(color=color, **marker_kwargs or {}),
opacity=0,
name=label,
showlegend=label is not None, **kwargs)
def barplot(self, canvas, x, height, width=0.8, bottom=0, color=Tango.colorsHex['mediumBlue'], label=None, **kwargs):
figure, _, _ = canvas
if 'barmode' in kwargs:
figure.layout['barmode'] = kwargs.pop('barmode')
return Bar(x=x, y=height, marker=Marker(color=color), name=label)
def xerrorbar(self, ax, X, Y, error, Z=None, color=Tango.colorsHex['mediumBlue'], label=None, error_kwargs=None, **kwargs):
error_kwargs = error_kwargs or {}
if (error.shape[0] == 2) and (error.ndim == 2):
error_kwargs.update(dict(array=error[1], arrayminus=error[0], symmetric=False))
else:
error_kwargs.update(dict(array=error, symmetric=True))
if Z is not None:
return Scatter3d(x=X, y=Y, z=Z, mode='markers',
error_x=ErrorX(color=color, **error_kwargs or {}),
marker=Marker(size='0'), name=label,
showlegend=label is not None, **kwargs)
return Scatter(x=X, y=Y, mode='markers',
error_x=ErrorX(color=color, **error_kwargs or {}),
marker=Marker(size='0'), name=label,
showlegend=label is not None,
**kwargs)
def yerrorbar(self, ax, X, Y, error, Z=None, color=Tango.colorsHex['mediumBlue'], label=None, error_kwargs=None, **kwargs):
error_kwargs = error_kwargs or {}
if (error.shape[0] == 2) and (error.ndim == 2):
error_kwargs.update(dict(array=error[1], arrayminus=error[0], symmetric=False))
else:
error_kwargs.update(dict(array=error, symmetric=True))
if Z is not None:
return Scatter3d(x=X, y=Y, z=Z, mode='markers',
error_y=ErrorY(color=color, **error_kwargs or {}),
marker=Marker(size='0'), name=label,
showlegend=label is not None, **kwargs)
return Scatter(x=X, y=Y, mode='markers',
error_y=ErrorY(color=color, **error_kwargs or {}),
marker=Marker(size='0'), name=label,
showlegend=label is not None,
**kwargs)
def imshow(self, ax, X, extent=None, label=None, vmin=None, vmax=None, **imshow_kwargs):
if not 'showscale' in imshow_kwargs:
imshow_kwargs['showscale'] = False
return Heatmap(z=X, name=label,
x0=extent[0], dx=float(extent[1]-extent[0])/(X.shape[0]-1),
y0=extent[2], dy=float(extent[3]-extent[2])/(X.shape[1]-1),
zmin=vmin, zmax=vmax,
showlegend=label is not None,
hoverinfo='z',
**imshow_kwargs)
def imshow_interact(self, ax, plot_function, extent=None, label=None, resolution=None, vmin=None, vmax=None, **imshow_kwargs):
# TODO stream interaction?
super(PlotlyPlots, self).imshow_interact(ax, plot_function)
def annotation_heatmap(self, ax, X, annotation, extent=None, label='Gradient', imshow_kwargs=None, **annotation_kwargs):
imshow_kwargs.setdefault('label', label)
imshow_kwargs.setdefault('showscale', True)
imshow = self.imshow(ax, X, extent, **imshow_kwargs)
X = X-X.min()
X /= X.max()/2.
X -= 1
x = np.linspace(extent[0], extent[1], X.shape[0])
y = np.linspace(extent[2], extent[3], X.shape[1])
annotations = Annotations()
for n, row in enumerate(annotation):
for m, val in enumerate(row):
var = X[n][m]
annotations.append(
Annotation(
text=str(val),
x=x[m], y=y[n],
xref='x1', yref='y1',
font=dict(color='white' if np.abs(var) > 0.8 else 'black', size=10),
opacity=.5,
showarrow=False,
))
return imshow, annotations
def annotation_heatmap_interact(self, ax, plot_function, extent, label=None, resolution=15, imshow_kwargs=None, **annotation_kwargs):
super(PlotlyPlots, self).annotation_heatmap_interact(ax, plot_function, extent)
def contour(self, ax, X, Y, C, levels=20, label=None, **kwargs):
return Contour(x=X, y=Y, z=C,
#ncontours=levels, contours=Contours(start=C.min(), end=C.max(), size=(C.max()-C.min())/levels),
name=label, **kwargs)
def surface(self, ax, X, Y, Z, color=None, label=None, **kwargs):
return Surface(x=X, y=Y, z=Z, name=label, showlegend=label is not None, **kwargs)
def fill_between(self, ax, X, lower, upper, color=Tango.colorsHex['mediumBlue'], label=None, line_kwargs=None, **kwargs):
if not 'line' in kwargs:
kwargs['line'] = Line(**line_kwargs or {})
else:
kwargs['line'].update(line_kwargs or {})
if color.startswith('#'):
fcolor = 'rgba({c[0]}, {c[1]}, {c[2]}, {alpha})'.format(c=Tango.hex2rgb(color), alpha=kwargs.get('opacity', 1.0))
else: fcolor = color
u = Scatter(x=X, y=upper, fillcolor=fcolor, showlegend=label is not None, name=label, fill='tonextx', legendgroup='{}_fill_({},{})'.format(label, ax[1], ax[2]), **kwargs)
#fcolor = '{}, {alpha})'.format(','.join(fcolor.split(',')[:-1]), alpha=0.0)
l = Scatter(x=X, y=lower, fillcolor=fcolor, showlegend=False, name=label, legendgroup='{}_fill_({},{})'.format(label, ax[1], ax[2]), **kwargs)
return l, u
def fill_gradient(self, canvas, X, percentiles, color=Tango.colorsHex['mediumBlue'], label=None, **kwargs):
if color.startswith('#'):
colarray = Tango.hex2rgb(color)
opacity = .9
else:
colarray = map(float(color.strip(')').split('(')[1]))
if len(colarray) == 4:
colarray, opacity = colarray[:3] ,colarray[3]
alpha = opacity*(1.-np.abs(np.linspace(-1,1,len(percentiles)-1)))
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
from itertools import tee
a, b = tee(iterable)
next(b, None)
return zip(a, b)
polycol = []
for i, y1, a in zip(range(len(percentiles)), percentiles, alpha):
fcolor = 'rgba({}, {}, {}, {alpha})'.format(*colarray, alpha=a)
if i == len(percentiles)/2:
polycol.append(Scatter(x=X, y=y1, fillcolor=fcolor, showlegend=True,
name=label, line=Line(width=0, smoothing=0), mode='none', fill='tonextx',
legendgroup='density', hoverinfo='none', **kwargs))
else:
polycol.append(Scatter(x=X, y=y1, fillcolor=fcolor, showlegend=False,
name=None, line=Line(width=1, smoothing=0, color=fcolor), mode='none', fill='tonextx',
legendgroup='density', hoverinfo='none', **kwargs))
return polycol
| avehtari/GPy | GPy/plotting/plotly_dep/plot_definitions.py | Python | bsd-3-clause | 15,302 |
import fluiddb
import unittest
class FluiddbTest(unittest.TestCase):
def test_fluiddb_call(self):
"""this test will test fluiddb call"""
status, result = fluiddb.call('GET', '/objects', query='fluiddb/users/username = "igorgue"')
assert status == 200
assert result == {u'ids': [u'71d5aa6f-d5fa-4578-9301-411fd92b1727']}
if __name__ == '__main__':
unittest.main()
| igorgue/fluiddb | fluiddb/test/test_fluiddb.py | Python | apache-2.0 | 405 |
#!/usr/bin/env python3
from math import ceil
from collections import Counter
def minTime(machines, goal):
c = Counter(machines)
fastest = min(c)
min_days = 1
max_days = ceil((fastest*goal)/c[fastest])
while max_days - min_days > 1:
mid = (min_days + max_days) // 2
output = sum((mid//x)*y for x, y in c.items())
if output < goal:
min_days = mid
else:
max_days = mid
return max_days
print(minTime([1, 3, 5], 30))
print(minTime([2, 3], 5))
print(minTime([1, 3, 4], 10))
print(minTime([4, 5, 6], 12))
| CajetanP/coding-exercises | HackerRank/Interview/Searching/min_time.py | Python | mit | 582 |
# -*- coding: UTF-8 -*-
# Copyright 2009-2015 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
"""Generates 20 fictive sales invoices, distributed over more than
one month.
"""
from __future__ import unicode_literals
from django.conf import settings
from lino.utils import Cycler
from lino.api import dd, rt
vat = dd.resolve_app('vat')
sales = dd.resolve_app('sales')
ledger = dd.resolve_app('ledger')
partner_model = settings.SITE.partners_app_label + '.Partner'
Partner = dd.resolve_model(partner_model)
REQUEST = settings.SITE.login()
def objects():
if False:
yield sales.InvoicingMode(
**dd.babel_values(
'name',
en='Default', de="Standard", fr="Standard"))
if ledger:
Invoice = dd.resolve_model('sales.VatProductInvoice')
InvoiceItem = dd.resolve_model('sales.InvoiceItem')
vt = ledger.VoucherTypes.get_for_model(Invoice)
JOURNALS = Cycler(vt.get_journals())
if len(JOURNALS.items) == 0:
raise Exception("20140127 no journals for %s" % vt)
PARTNERS = Cycler(Partner.objects.all())
USERS = Cycler(settings.SITE.user_model.objects.all())
PRODUCTS = Cycler(rt.models.products.Product.objects.all())
ITEMCOUNT = Cycler(1, 2, 3)
for i in range(20):
jnl = JOURNALS.pop()
invoice = Invoice(
journal=jnl,
partner=PARTNERS.pop(),
user=USERS.pop(),
date=settings.SITE.demo_date(i-21))
yield invoice
for j in range(ITEMCOUNT.pop()):
item = InvoiceItem(voucher=invoice, product=PRODUCTS.pop())
item.product_changed(REQUEST)
item.before_ui_save(REQUEST, None)
yield item
invoice.register(REQUEST)
invoice.save()
| lino-framework/xl | lino_xl/lib/sales/fixtures/unused_demo.py | Python | bsd-2-clause | 1,912 |
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
import mock
from pulp.client.commands.unit import UnitRemoveCommand
from pulp_puppet.common.constants import DISPLAY_MODULES_THRESHOLD, TYPE_PUPPET_MODULE
from pulp_puppet.devel.base_cli import ExtensionTests
from pulp_puppet.extensions.admin.repo.remove import RemoveCommand, DESC_REMOVE
class RemovePuppetModulesCommand(ExtensionTests):
def setUp(self):
super(RemovePuppetModulesCommand, self).setUp()
self.command = RemoveCommand(self.context)
def test_defaults(self):
self.assertTrue(isinstance(self.command, UnitRemoveCommand))
self.assertEqual('remove', self.command.name)
self.assertEqual(DESC_REMOVE, self.command.description)
self.assertEqual(DISPLAY_MODULES_THRESHOLD, self.command.max_units_displayed)
# uses default remove method
self.assertEqual(self.command.method, self.command.run)
@mock.patch('pulp_puppet.extensions.admin.repo.units_display.get_formatter_for_type')
def test_get_formatter_for_type(self, mock_formatter):
context = mock.MagicMock()
command = RemoveCommand(context)
command.get_formatter_for_type(TYPE_PUPPET_MODULE)
mock_formatter.assert_called_once_with(TYPE_PUPPET_MODULE)
| ipanova/pulp_puppet | pulp_puppet_extensions_admin/test/unit/extensions/admin/repo/test_remove.py | Python | gpl-2.0 | 1,803 |
# referenciacatastral.py - functions for handling Spanish real state ids
# coding: utf-8
#
# Copyright (C) 2016 David García Garzón
# Copyright (C) 2016-2017 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""Referencia Catastral (Spanish real estate property id)
The cadastral reference code is an identifier for real estate in Spain. It is
issued by Dirección General del Catastro (General Directorate of Land
Registry) of the Ministerio de Hacienda (Tresury Ministry).
It has 20 digits and contains numbers and letters including the Spanish Ñ.
The number consists of 14 digits for the parcel, 4 for identifying properties
within the parcel and 2 check digits. The parcel digits are structured
differently for urban, non-urban or special (infrastructure) cases.
More information:
* http://www.catastro.meh.es/esp/referencia_catastral_1.asp (Spanish)
* http://www.catastro.meh.es/documentos/05042010_P.pdf (Spanish)
* https://es.wikipedia.org/wiki/Catastro#Referencia_catastral
>>> validate('7837301-VG8173B-0001 TT') # Lanteira town hall
'7837301VG8173B0001TT'
>>> validate('783301 VG8173B 0001 TT') # missing digit
Traceback (most recent call last):
...
InvalidLength: ...
>>> validate('7837301/VG8173B 0001 TT') # not alphanumeric
Traceback (most recent call last):
...
InvalidFormat: ...
>>> validate('7837301 VG8173B 0001 NN') # bad check digits
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('4A08169P03PRAT0001LR') # BCN Airport
'4A08169 P03PRAT 0001 LR'
"""
from stdnum.exceptions import *
from stdnum.util import clean
alphabet = u'ABCDEFGHIJKLMNÑOPQRSTUVWXYZ0123456789'
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, ' -').strip().upper()
def format(number):
"""Reformat the passed number to the standard format."""
number = compact(number)
return ' '.join([
number[:7],
number[7:14],
number[14:18],
number[18:]
])
# The check digit implementation is based on the Javascript
# implementation by Vicente Sancho that can be found at
# http://trellat.es/validar-la-referencia-catastral-en-javascript/
def _check_digit(number):
"""Calculate a single check digit on the provided part of the number."""
weights = (13, 15, 12, 5, 4, 17, 9, 21, 3, 7, 1)
s = sum(w * (int(n) if n.isdigit() else alphabet.find(n) + 1)
for w, n in zip(weights, number))
return 'MQWERTYUIOPASDFGHJKLBZX'[s % 23]
def _force_unicode(number):
"""Convert the number to unicode."""
if not hasattr(number, 'isnumeric'): # pragma: no cover (Python 2 code)
number = number.decode('utf-8')
return number
def calc_check_digits(number):
"""Calculate the check digits for the number."""
number = _force_unicode(compact(number))
return (
_check_digit(number[0:7] + number[14:18]) +
_check_digit(number[7:14] + number[14:18]))
def validate(number):
"""Checks to see if the number provided is a valid Cadastral Reference.
This checks the length, formatting and check digits."""
number = compact(number)
n = _force_unicode(number)
if not all(c in alphabet for c in n):
raise InvalidFormat()
if len(n) != 20:
raise InvalidLength()
if calc_check_digits(n) != n[18:]:
raise InvalidChecksum()
return number
def is_valid(number):
"""Checks to see if the number provided is a valid Cadastral Reference."""
try:
return bool(validate(number))
except ValidationError:
return False
| holvi/python-stdnum | stdnum/es/referenciacatastral.py | Python | lgpl-2.1 | 4,360 |
"""Jabber related classes"""
__revision__ = ""
import jcl.model.account as account
from jcl.model.account import Account
class Handler(object):
"""handling class"""
def __init__(self, component):
"""Default Handler constructor"""
self.component = component
def filter(self, stanza, lang_class):
"""
Filter account to be processed by the handler
return all accounts. DB connection might already be opened.
"""
accounts = account.get_all_accounts()
return accounts
def handle(self, stanza, lang_class, data):
"""
Apply actions to do on given accounts
Do nothing by default.
"""
return []
def root_filter(self, stanza, lang_class, node=None):
"""Filter stanza sent to root node"""
to_jid = stanza.get_to()
if to_jid.resource is None and to_jid.node is None and node is None:
return True
else:
return None
def account_type_filter(self, stanza, lang_class, node=None):
"""Filter stanzas sent to account type node"""
to_jid = stanza.get_to()
account_type = to_jid.resource
if account_type is not None and to_jid.node is None:
return account_type
else:
return None
def account_filter(self, stanza, lang_class, node=None):
"""Filter stanzas sent to account jid"""
name = stanza.get_to().node
return name
def get_account_filter(self, stanza, lang_class, node=None):
"""Filter stanzas sent to account jid, only if account exists"""
name = stanza.get_to().node
if name is not None:
return account.get_account(unicode(stanza.get_from().bare()),
name)
else:
return None
def get_accounts_root_filter(self, stanza, lang_class, node=None):
"""Filter stanza sent to root node"""
to_jid = stanza.get_to()
if to_jid.resource is None and to_jid.node is None and node is None:
return account.get_accounts(unicode(stanza.get_from().bare()))
else:
return None
def replace_handlers(handlers, old_handler_type, new_handler):
"""
Replace handlers of type `old_handler_type` in `handlers` by `new_handler`
"""
for handler_group in handlers:
for i in xrange(len(handler_group)):
if handler_group[i].__class__.__name__ == old_handler_type.__name__:
handler_group[i] = new_handler
| dax/jcl | src/jcl/jabber/__init__.py | Python | lgpl-2.1 | 2,413 |
from handlers.auth_handler import LoginHandler, LogoutHandler, SignupHandler
from handlers.index_handler import IndexHandler
from handlers.user_handler import UsersHandler, UserHandler
from handlers.group_handler import GroupsHandler, GroupHandler, GroupUserHandler, GroupEditHandler, GroupMemberAdditionHandler, GroupDeleteHandler
from handlers.room_handler import RoomsHandler, RoomHandler, RoomEditHandler, RoomDeleteHandler, RoomSocketHandler, BrainstormingHandler
from tornado.web import url
url_patterns = (
url(r'/', IndexHandler, name='index'),
url(r'/auth/login/', LoginHandler, name='login'),
url(r'/auth/logout/', LogoutHandler, name='logout'),
url(r'/auth/signup/', SignupHandler, name='signup'),
url(r'/users/', UsersHandler, name='users'),
url(r'/users/([0-9]+)/', UserHandler, name='user'),
url(r'/groups/', GroupsHandler, name='groups'),
url(r'/groups/([0-9]+)/', GroupHandler, name='group'),
url(r'/groups/([0-9]+)/edit', GroupEditHandler, name='group_edit'),
url(r'/groups/([0-9]+)/delete', GroupDeleteHandler, name='group_delete'),
url(r'/groups/([0-9]+)/members/', GroupUserHandler, name='group_user'),
url(r'/groups/([0-9]+)/search_new_members/', GroupMemberAdditionHandler, name='member_addition'),
url(r'/groups/([0-9]+)/rooms/', RoomsHandler, name='rooms'),
url(r'/groups/([0-9]+)/rooms/([0-9]+)/', RoomHandler, name='room'),
url(r'/groups/([0-9]+)/rooms/([0-9]+)/edit', RoomEditHandler, name='room_edit'),
url(r'/groups/([0-9]+)/rooms/([0-9]+)/delete', RoomDeleteHandler, name='room_delete'),
#url(r'/websocket', RoomSocketHandler, name='room_socket'),
url(r'/websocket', BrainstormingHandler, name='room_socket'),
) | Hironsan/Brain_Hacker | urls.py | Python | mit | 1,719 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
This module provides the support of CSV file for araisan.
"""
import sys
import csv
class CSVWriter:
"""
CSV Writer
"""
def __init__(self, datatarget, config):
"""
Init CSVWriter with datatarget and config
"""
self.__csvfile__ = open(datatarget, 'w')
self.__writer__ = csv.DictWriter(self.__csvfile__, config['fieldnames'])
self.__writer__.writeheader()
def __write__(self, data):
"""
This writes data to predefined datatarget
"""
self.__writer__.writerow(data)
| moeoverflow/Araisan | araisan/Writer/CSVWriter.py | Python | mit | 616 |
import sys
def main():
import argparse
import redis
import time
import pickle
import traceback
import tldextract
from scutils.redis_queue import RedisPriorityQueue
from scutils.redis_throttled_queue import RedisThrottledQueue
parser = argparse.ArgumentParser(description="Scrapy Cluster Migration "
"script. Use to upgrade any part of "
"Scrapy Cluster. Not recommended for "
"use while your cluster"
" is running.")
parser.add_argument('-r', '--redis-host', action='store', required=True,
help="The Redis host ip")
parser.add_argument('-p', '--redis-port', action='store', default='6379',
help="The Redis port")
parser.add_argument('-sv', '--start-version', action='store',
help="The current cluster version", required=True,
choices=['1.0'])
parser.add_argument('-ev', '--end-version', action='store', default='1.1',
help="The desired cluster version", required=True,
choices=['1.1'])
args = vars(parser.parse_args())
current_version = args['start_version']
start_time = time.time()
redis_conn = redis.Redis(args['redis_host'], args['redis_port'])
try:
# in the future there may be more versions that need upgraded
# Upgrade 1.0 to 1.1
if current_version == '1.0':
print "Upgrading Cluster from 1.0 to 1.1"
extract = tldextract.TLDExtract()
queue_keys = redis_conn.keys("*:queue")
for queue in queue_keys:
elements = queue.split(":")
spider = elements[0]
if len(elements) == 2:
print "Upgrading", spider, "spider"
old_count = redis_conn.zcard(queue)
# loop through all elements
for item in redis_conn.zscan_iter(queue):
item_key = item[0]
item = pickle.loads(item_key)
# format key
ex_res = extract(item['url'])
key = "{sid}:{dom}.{suf}:queue".format(
sid=item['spiderid'],
dom=ex_res.domain,
suf=ex_res.suffix)
val = pickle.dumps(item, protocol=-1)
# shortcut to shove stuff into the priority queue
redis_conn.zadd(key, val, -item['priority'])
# loop through all new keys
new_count = 0
for key in redis_conn.keys('{s}:*:queue'.format(s=spider)):
new_count = new_count + redis_conn.zcard(key)
if new_count == old_count:
print "Successfully migrated", new_count, "requests for",\
spider, "spider"
redis_conn.delete(queue)
else:
print "Unknown error when migrating requessts {o}/{n}"\
.format(o=old_count, n=new_count)
sys.exit(1)
current_version = '1.1'
except Exception as e:
print "Error Upgrading Cluster."
print traceback.print_exc()
sys.exit(1)
completion_time = int(start_time - time.time())
print "Cluster upgrade complete in", "%.2f" % completion_time, "seconds."
print "Upgraded cluster from " + args['start_version'] + " to " \
+ args['end_version']
if __name__ == "__main__":
sys.exit(main()) | quixey/scrapy-cluster | migrate.py | Python | mit | 3,812 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('address_line1', models.CharField(max_length=100, verbose_name=b'Address line 1')),
('address_line2', models.CharField(max_length=100, verbose_name=b'Address line 2', blank=True)),
('city', models.CharField(max_length=50)),
('state_province', models.CharField(max_length=40, verbose_name=b'State/Province', blank=True)),
('postal_code', models.CharField(max_length=10, verbose_name=b'Postal Code')),
('geom', django.contrib.gis.db.models.fields.PointField(srid=4326, null=True, blank=True)),
('geocode_results', jsonfield.fields.JSONField(null=True, blank=True)),
],
options={
'verbose_name_plural': 'Addresses',
},
),
migrations.CreateModel(
name='Country',
fields=[
('iso_code', models.CharField(max_length=2, serialize=False, primary_key=True)),
('name', models.CharField(max_length=45)),
],
options={
'ordering': ['name', 'iso_code'],
'verbose_name_plural': 'Countries',
},
),
migrations.AddField(
model_name='address',
name='country',
field=models.ForeignKey(to='firecares_core.Country'),
),
migrations.AlterUniqueTogether(
name='address',
unique_together=set([('address_line1', 'address_line2', 'postal_code', 'city', 'state_province', 'country')]),
),
]
| garnertb/firecares | firecares/firecares_core/migrations/0001_initial.py | Python | mit | 2,028 |
import sublime
import sublime_plugin
import subprocess
import threading
class PicoRunCartCommand(sublime_plugin.TextCommand):
def run(self, edit):
def target():
subprocess.Popen(self.cmd, bufsize=-1, stdin=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
pico8 = "\"" + self.view.settings().get("pico-8_path", "undefined") + "\""
if pico8 == "undefined":
sublime.error_message("Error: \"pico-8_path\" is not defined !\n\nRun \"PICO-8: Setup Path\" from the Command Palette.")
return
cart = "\"" + self.view.file_name() + "\""
self.cmd = pico8 + " -run " + cart
threading.Thread(target=target).start()
| Neko250/sublime-PICO-8 | pico_run_cart.py | Python | mit | 630 |
import os
from setuptools import setup
version = '0.2'
description = "A command-line tool for identification and Extraction of Variant Attributes."
cur_dir = os.path.dirname(__file__)
requirements = open(os.path.join(cur_dir, 'REQUIREMENTS')).read()
try:
long_description = open(os.path.join(cur_dir, 'README.rst')).read()
except:
long_description = description
setup(
name = "iEVA",
version = version,
url = 'https://github.com/Matteodigg/iEVA',
license = 'MIT',
description = description,
long_description = long_description,
author = 'Matteo Di Giovannantonio & Mario Urtis',
author_email = 'matteodeg@gmail.com',
packages = ['iEVA'],
install_requires = requirements,
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
],
)
| Matteodigg/iEVA | setup.py | Python | mit | 835 |
# -*- coding: utf-8 -*-
from django.db import models
# Create your models here.
class Category(models.Model):
class Meta:
verbose_name = 'category'
verbose_name_plural = 'categories'
ordering = ['created_at']
name = models.CharField(max_length=50)
description = models.TextField(max_length=255)
created_at = models.DateField(auto_now=True)
def __str__(self):
return self.name
| Brunux/shityjobs | categories/models.py | Python | mit | 435 |
###########################################################################
#
# Copyright (c) 2015 Adobe Systems Incorporated. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
# This file is build on rdd.py inside $SPARK_HOME/python/pyspark and
# extend it to include basic functions that support GPU operations
# for an RDD. The extentsion include all functions with name <gpu*>
import copy
from collections import defaultdict
from itertools import chain, ifilter, imap
import operator
import sys
import shlex
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
import warnings
import heapq
import bisect
import random
import socket
from math import sqrt, log, isinf, isnan, pow, ceil
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, InMemoryMerger, ExternalMerger, \
get_used_memory, ExternalSorter
from pyspark.traceback_utils import SCCallSiteSync
from py4j.java_collections import ListConverter, MapConverter
# Numbapro and pycuda modules.
# To use Numbapro, get a license from https://store.continuum.io/cshop/accelerate/
from numbapro import vectorize
import numbapro.cudalib.sorting as sorting
import numpy as np
from pycuda import gpuarray, reduction
import time
__all__ = ["RDD"]
# TODO: for Python 3.3+, PYTHONHASHSEED should be reset to disable randomized
# hash for string
def portable_hash(x):
"""
This function returns consistant hash code for builtin types, especially
for None and tuple with None.
The algrithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxint
h ^= len(x)
if h == -1:
h = -2
return h
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1] not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(port, serializer):
sock = socket.socket()
sock.settimeout(3)
try:
sock.connect(("localhost", port))
rf = sock.makefile("rb", 65536)
for item in serializer.load_stream(rf):
yield item
finally:
sock.close()
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY_SER}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY_SER)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY_SER):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY_SER}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
# def map(self, f, preservesPartitioning=False):
# """
# Return a new RDD by applying a function to each element of this RDD.
#
# >>> rdd = sc.parallelize(["b", "a", "c"])
# >>> sorted(rdd.map(lambda x: (x, 1)).collect())
# [('a', 1), ('b', 1), ('c', 1)]
# """
# def func(_, iterator):
# return imap(f, iterator)
# return self.mapPartitionsWithIndex(func, preservesPartitioning)
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return imap(f, iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(imap(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return ifilter(f, iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda (x, _): x)
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
>>> rdd = sc.parallelize(range(100), 4)
>>> rdd.sample(False, 0.1, 81).count()
10
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(5), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> rdd1.collect()
[1, 3]
>>> rdd2.collect()
[0, 2, 4]
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxint - int(numStDev * sqrt(sys.maxint))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxint)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
Note that this method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda (k, vs): all(vs)) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, 2)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
spill = (self.ctx._conf.get("spark.shuffle.spill", 'True').lower() == "true")
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted if spill else sorted
return iter(sort(iterator, key=lambda (k, v): keyfunc(k), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
# noqa
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
spill = (self.ctx._conf.get("spark.shuffle.spill", 'True').lower() == 'true')
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted if spill else sorted
return iter(sort(iterator, key=lambda (k, v): keyfunc(k), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda (k, v): k).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[len(samples) * (i + 1) / numPartitions]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions)
def pipe(self, command, env={}):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
['1', '2', '', '3']
"""
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
out.write(str(obj).rstrip('\n') + '\n')
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
return (x.rstrip('\n') for x in iter(pipe.stdout.readline, ''))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print x
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print x
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(port, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero
value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(obj, acc)
yield acc
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
return self.mapPartitions(func).fold(zeroValue, combOp)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = numPartitions
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).reduce(operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element(where n = # buckets).
Buckets must be sorted and not contain any duplicates, must be
at least two elements.
If `buckets` is a number, it will generates buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given buckets
as 2, the resulting buckets will be [0,50) [50,100]. buckets must
be at least 1 If the RDD contains infinity, NaN throws an exception
If the elements in RDD do not vary (max == min) always returns
a single bucket.
It will return an tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, (int, long)):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.iteritems():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from a RDD.
Note: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from a RDD ordered in ascending order or as
specified by the optional key function.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self._jrdd.partitions().size()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p, True)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all. Note that an RDD
may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self._jrdd.partitions().size() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).collect())
[1, 2, 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> ''.join(sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed)))
'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda (k, v): k)
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda (k, v): v)
def reduceByKey(self, func, numPartitions=None):
"""
Merge the values for each key using an associative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be hash-partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.iteritems():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> set(sets[0]).intersection(set(sets[1]))
set([])
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in buckets.keys():
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = (size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(batch / 1.5, 1)
c = 0
for split, items in buckets.iteritems():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def f(x): return x
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
spill = (self.ctx._conf.get("spark.shuffle.spill", 'True').lower()
== 'true')
memory = _parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m"))
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer) \
if spill else InMemoryMerger(agg)
merger.mergeValues(iterator)
return merger.iteritems()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer) \
if spill else InMemoryMerger(agg)
merger.mergeCombiners(iterator)
return merger.iteritems()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions)
def foldByKey(self, zeroValue, func, numPartitions=None):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> rdd.foldByKey(0, add).collect()
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions)
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with into numPartitions partitions.
Note: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> map((lambda (x,y): (x, list(y))), sorted(x.groupByKey().collect()))
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
return self.combineByKey(createCombiner, mergeValue, mergeCombiners,
numPartitions).mapValues(lambda x: ResultIterable(x))
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda (k, v): ((k, x) for x in f(v))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda (k, v): (k, f(v))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> map((lambda (x,y): (x, (list(y[0]), list(y[1]), list(y[2]), list(y[3])))), \
sorted(list(w.groupWith(x, y, z).collect())))
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> map((lambda (x,y): (x, (list(y[0]), list(y[1])))), sorted(list(x.cogroup(y).collect())))
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func((key, vals)):
return vals[0] and not vals[1]
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> map((lambda (x,y): (x, (list(y[0]), (list(y[1]))))), sorted(x.cogroup(y).collect()))
[(0, ([0], [0])), (1, ([1], [1])), (2, ([], [2])), (3, ([], [3])), (4, ([2], [4]))]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
jrdd = self._jrdd.repartition(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
jrdd = self._jrdd.coalesce(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
name_ = self._jrdd.name()
if name_:
return name_.encode('utf-8')
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.setName('RDD1').name()
'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
"""
values = self.filter(lambda (k, v): k == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)], False)
return values.collect()
def _to_java_object_rdd(self):
""" Return an JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(xrange(1000))
>>> (rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(xrange(1000)) / 1000.0
>>> (rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available
<a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 950 < n < 1050
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 18 < n < 22
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
if relativeSD > 0.37:
raise ValueError("relativeSD should be smaller than 0.37")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
partitions = xrange(self.getNumPartitions())
for partition in partitions:
rows = self.context.runJob(self, lambda x: x, [partition])
for row in rows:
yield row
# RDD Additional functions for GPU MapReduce, Word-Count, and sorting
#
def gpuReduce(self, outtype, mapper,reduceop, arglist):
"""Performs a map or reduce task on GPU by taking C code embedded in strings
>>> rdd = sc.parallelize(range(1,10000))
>>> rdd.gpuReduce(long, "x[i] * y[i]", "+" ,"long *x, long *y")
"""
cpudataset = np.asarray(self.collect())
a = gpuarray.to_gpu(cpudataset)
b = a
if reduceop == "*": initval = "1"
elif reduceop == "+": initval = "0"
else: print("Currently only \"+\" and \"*\" operations are supported \
by GPU reduction")
reduceexpr = "a" + reduceop + "b"
print reduceexpr
krnl = reduction.ReductionKernel(outtype, neutral=initval, map_expr=mapper, reduce_expr=reduceexpr, arguments=arglist)
results = krnl(a, b).get()
print results
def gpuMap(self, mapper):
"""Performs a map task on GPU by taking C code embedded in strings
limitations: currently only accepts x as the map expression variable
>>> rdd = sc.parallelize(range(100000))
>>> rdd.gpuMap("x * 4")
"""
cpudataset = np.asarray(self.collect())
x = gpuarray.to_gpu(cpudataset)
results = (eval(mapper)).get()
return results
def gpuWordCount(self):
"""Performs word count by first rearranging and superposing the input
data to itself and tracking assiging each word a value of 1 by tracking
space key (ASCII code = 32) occurence.
>>> rdd = sc.textFile("README.md")
>>> rdd.gpuWordCount()
"""
import pycuda.driver as cuda
start = time.time()
cpudataset = " ".join(self.collect())
asciidata = np.asarray([ord(x) for x in cpudataset], dtype=np.uint8)
gpudataset = gpuarray.to_gpu(asciidata)
countkrnl = reduction.ReductionKernel(long, neutral = "0",
map_expr = "(a[i] == 32)*(b[i] != 32)",
reduce_expr = "a + b", arguments = "char *a, char *b")
results = 1 + countkrnl(gpudataset[:-1],gpudataset[1:]).get()
return results
def gpuWCProfiling(self):
"""This function tests where time
goes during GPU WordCount using
the collect scheme. It is used in the same way
gpuWordCount() is used.
"""
print "collecting..."
start = time.time()
cpudataset = " ".join(self.collect())
print (time.time() - start), " seconds"
print "converting to ascii..."
start = time.time()
asciidata = np.asarray([ord(x) for x in cpudataset], dtype=np.uint8)
print (time.time() - start), " seconds"
print "copying to gpu..."
start = time.time()
gpudataset = gpuarray.to_gpu(asciidata)
print (time.time() - start), " seconds"
print "calling kernel:"
start = time.time()
countkrnl = reduction.ReductionKernel(long, neutral = "0",
map_expr = "(a[i] == 32)*(b[i] != 32)",
reduce_expr = "a + b", arguments = "char *a, char *b")
results = 1 + countkrnl(gpudataset[:-1],gpudataset[1:]).get()
print (time.time() - start), " seconds"
return results
def gpuSort(self):
"""Sorts numerical values:
>>> import numpy as np
>> rdd = sc.parallelize(np.random.uniform(-10,10,size=1000))
>> rdd.gpuSort()
"""
data = self.collect()
data = np.asarray(data).astype(np.float64)
length = len(data)
sorting.RadixSort(length, np.float64, descending=False, stream=0).sort(data)
return data
def gpuSortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
#spill = (self.ctx._conf.get("spark.shuffle.spill", 'True').lower() == 'true')
#memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
#serializer = self._jrdd_deserializer
def gpuSortPartition(iterator):
iterator = iter(iterator)
data = list(iterator)
keys, values = zip(*data)
length = len(keys)
keys = np.asarray(keys).astype(np.float32)
values = np.asarray(values).astype(np.float32)
gpuobject = sorting.RadixSort(length, np.float32, descending = False, stream=0)
gpuobject.sort(keys, values, begin_bit=0, end_bit=None)
data = zip(keys, values)
#del keys
#del values
return data
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(gpuSortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda (k, v): k).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[len(samples) * (i + 1) / numPartitions]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(gpuSortPartition, True)
def gpuIntSortByKey(self, descend = False):
"""Sorts <key,val> type by key. This function is limited
to integer types. For floats, use gpuFloatSortByKey
>>> import random
>>> keyvals = zip(random.sample(range(1000),100), random.sample(range(1000),100))
>>> rdd = sc.parallelize(keyvals)
>>> rdd.gpuIntSortByKey()
"""
data = self.collect()
keys, values = zip(*data)
length = len(keys)
keys = np.asarray(keys).astype(np.int32)
values = np.asarray(values).astype(np.int32)
gpuobject = sorting.RadixSort(length, np.int32, descending = descend, stream=0)
gpuobject.sort(keys, values, begin_bit=0, end_bit=None)
data = zip(keys, values)
return data
def gpuFloatSortByKey(self, descend = False):
"""Sort <key, val> containers by key.
The function is used in the same way as gpuIntSortBykey but
this one is applicaple to both floats and integers.
"""
data = self.collect()
print "collecting finished"
# Data preparation
start_prep = time.time()
keys, values = zip(*data)
length = len(keys)
keys = np.asarray(keys).astype(np.float32)
values = np.asarray(values).astype(np.float32)
stop_prep = time.time()
print "prep time: ", stop_prep - start_prep
# GPU computation
start_gpu = time.time()
gpuobject = sorting.RadixSort(length, np.float32, descending = descend, stream=0)
gpuobject.sort(keys, values, begin_bit=0, end_bit=None)
data = zip(keys, values)
stop_gpu = time.time()
print "gpu time: ", stop_gpu - start_gpu
return data
#temp functions to test anaconda accelerate on spark
def getPartId(self):
import os
part_data={}
def func(iterator):
iterator = iter(iterator)
cpu_data = list(iterator)
cpu_dataset = " ".join(cpu_data)
ascii_data = np.asarray([ord(x) for x in cpu_dataset], dtype=np.uint8)
part_id = os.getpid()
part_data[part_id] = ascii_data
print "inside: ", part_data
yield part_id
part_ids = self.mapPartitions(func).collect()
print "outside: ", part_data
return part_ids
def _prepare_for_python_RDD(sc, command, obj=None):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
# tracking the life cycle by obj
if obj is not None:
obj._broadcast = broadcast
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in sc._pickled_broadcast_vars],
sc._gateway._gateway_client)
sc._pickled_broadcast_vars.clear()
env = MapConverter().convert(sc.environment, sc._gateway._gateway_client)
includes = ListConverter().convert(sc._python_includes, sc._gateway._gateway_client)
return pickled_command, broadcast_vars, env, includes
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
self._broadcast = None
def __del__(self):
if self._broadcast:
self._broadcast.unpersist()
self._broadcast = None
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
command = (self.func, profiler, self._prev_jrdd_deserializer,
self._jrdd_deserializer)
pickled_cmd, bvars, env, includes = _prepare_for_python_RDD(self.ctx, command, self)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(),
bytearray(pickled_cmd),
env, includes, self.preservesPartitioning,
self.ctx.pythonExec,
bvars, self.ctx._javaAccumulator)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| adobe-research/spark-gpu | src/rdd.py | Python | apache-2.0 | 97,723 |
# linker.py 12/05/2015 D.J.Whale
#
# A placeholder for an object linker
#
# Planned features
# process import tables
# process export tables
# list import tables
# list export tables
# fixup external addresses
# create an absolute object file for loading
# link from object files
# link from library files
# create a link memory map (program space)
# create a link memory map (data space)
# create a dump of call/return stack frames
# perhaps use choice of register parameters and stack parameters
# stack depth analysis, call tree
# END
| whaleygeek/MyLittleComputer | src/python/linker.py | Python | mit | 567 |
from gi.repository import Gtk
from gmusic.windows import WithMenu, LoginWindow
from gmusic.user import User
class MainWindow(Gtk.Window, WithMenu):
def __init__(self):
Gtk.Window.__init__(self, title="GMusic")
self.set_default_size(800, 400)
self.box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.setup_uimanager()
self.setup_menu()
self.add(self.box)
User.check_login()
# Window has been initialised and is ready
def ready(self):
if User.loggedin:
self.activate()
else:
login_window = LoginWindow(self)
login_window.show_all()
# Activates the window for use
def activate(self):
print("User logged in, main window ready")
| nirix-old/gmusic | gmusic/windows/main.py | Python | gpl-3.0 | 771 |
# Copyright (C) 2015 JWCrypto Project Contributors - see LICENSE file
from jwcrypto.common import JWException
from jwcrypto.common import JWSEHeaderParameter, JWSEHeaderRegistry
from jwcrypto.common import base64url_decode, base64url_encode
from jwcrypto.common import json_decode, json_encode
from jwcrypto.jwa import JWA
from jwcrypto.jwk import JWK
JWSHeaderRegistry = {
'alg': JWSEHeaderParameter('Algorithm', False, True, None),
'jku': JWSEHeaderParameter('JWK Set URL', False, False, None),
'jwk': JWSEHeaderParameter('JSON Web Key', False, False, None),
'kid': JWSEHeaderParameter('Key ID', False, True, None),
'x5u': JWSEHeaderParameter('X.509 URL', False, False, None),
'x5c': JWSEHeaderParameter('X.509 Certificate Chain', False, False, None),
'x5t': JWSEHeaderParameter(
'X.509 Certificate SHA-1 Thumbprint', False, False, None),
'x5t#S256': JWSEHeaderParameter(
'X.509 Certificate SHA-256 Thumbprint', False, False, None),
'typ': JWSEHeaderParameter('Type', False, True, None),
'cty': JWSEHeaderParameter('Content Type', False, True, None),
'crit': JWSEHeaderParameter('Critical', True, True, None),
'b64': JWSEHeaderParameter('Base64url-Encode Payload', True, True, None)
}
"""Registry of valid header parameters"""
default_allowed_algs = [
'HS256', 'HS384', 'HS512',
'RS256', 'RS384', 'RS512',
'ES256', 'ES384', 'ES512',
'PS256', 'PS384', 'PS512',
'EdDSA', 'ES256K']
"""Default allowed algorithms"""
class InvalidJWSSignature(JWException):
"""Invalid JWS Signature.
This exception is raised when a signature cannot be validated.
"""
def __init__(self, message=None, exception=None):
msg = None
if message:
msg = str(message)
else:
msg = 'Unknown Signature Verification Failure'
if exception:
msg += ' {%s}' % str(exception)
super(InvalidJWSSignature, self).__init__(msg)
class InvalidJWSObject(JWException):
"""Invalid JWS Object.
This exception is raised when the JWS Object is invalid and/or
improperly formatted.
"""
def __init__(self, message=None, exception=None):
msg = 'Invalid JWS Object'
if message:
msg += ' [%s]' % message
if exception:
msg += ' {%s}' % str(exception)
super(InvalidJWSObject, self).__init__(msg)
class InvalidJWSOperation(JWException):
"""Invalid JWS Object.
This exception is raised when a requested operation cannot
be execute due to unsatisfied conditions.
"""
def __init__(self, message=None, exception=None):
msg = None
if message:
msg = message
else:
msg = 'Unknown Operation Failure'
if exception:
msg += ' {%s}' % str(exception)
super(InvalidJWSOperation, self).__init__(msg)
class JWSCore(object):
"""The inner JWS Core object.
This object SHOULD NOT be used directly, the JWS object should be
used instead as JWS perform necessary checks on the validity of
the object and requested operations.
"""
def __init__(self, alg, key, header, payload, algs=None):
"""Core JWS token handling.
:param alg: The algorithm used to produce the signature.
See RFC 7518
:param key: A (:class:`jwcrypto.jwk.JWK`) key of appropriate
type for the "alg" provided in the 'protected' json string.
:param header: A JSON string representing the protected header.
:param payload(bytes): An arbitrary value
:param algs: An optional list of allowed algorithms
:raises ValueError: if the key is not a :class:`JWK` object
:raises InvalidJWAAlgorithm: if the algorithm is not valid, is
unknown or otherwise not yet implemented.
"""
self.alg = alg
self.engine = self._jwa(alg, algs)
if not isinstance(key, JWK):
raise ValueError('key is not a JWK object')
self.key = key
if header is not None:
if isinstance(header, dict):
self.header = header
header = json_encode(header)
else:
self.header = json_decode(header)
self.protected = base64url_encode(header.encode('utf-8'))
else:
self.header = dict()
self.protected = ''
self.payload = payload
def _jwa(self, name, allowed):
if allowed is None:
allowed = default_allowed_algs
if name not in allowed:
raise InvalidJWSOperation('Algorithm not allowed')
return JWA.signing_alg(name)
def _payload(self):
if self.header.get('b64', True):
return base64url_encode(self.payload).encode('utf-8')
else:
if isinstance(self.payload, bytes):
return self.payload
else:
return self.payload.encode('utf-8')
def sign(self):
"""Generates a signature"""
payload = self._payload()
sigin = b'.'.join([self.protected.encode('utf-8'), payload])
signature = self.engine.sign(self.key, sigin)
return {'protected': self.protected,
'payload': payload,
'signature': base64url_encode(signature)}
def verify(self, signature):
"""Verifies a signature
:raises InvalidJWSSignature: if the verification fails.
"""
try:
payload = self._payload()
sigin = b'.'.join([self.protected.encode('utf-8'), payload])
self.engine.verify(self.key, sigin, signature)
except Exception as e: # pylint: disable=broad-except
raise InvalidJWSSignature('Verification failed', repr(e))
return True
class JWS(object):
"""JSON Web Signature object
This object represent a JWS token.
"""
def __init__(self, payload=None, header_registry=None):
"""Creates a JWS object.
:param payload(bytes): An arbitrary value (optional).
:param header_registry: Optional additions to the header registry
"""
self.objects = dict()
if payload:
self.objects['payload'] = payload
self.verifylog = None
self._allowed_algs = None
self.header_registry = JWSEHeaderRegistry(JWSHeaderRegistry)
if header_registry:
self.header_registry.update(header_registry)
@property
def allowed_algs(self):
"""Allowed algorithms.
The list of allowed algorithms.
Can be changed by setting a list of algorithm names.
"""
if self._allowed_algs:
return self._allowed_algs
else:
return default_allowed_algs
@allowed_algs.setter
def allowed_algs(self, algs):
if not isinstance(algs, list):
raise TypeError('Allowed Algs must be a list')
self._allowed_algs = algs
@property
def is_valid(self):
return self.objects.get('valid', False)
# TODO: allow caller to specify list of headers it understands
# FIXME: Merge and check to be changed to two separate functions
def _merge_check_headers(self, protected, *headers):
header = None
crit = []
if protected is not None:
if 'crit' in protected:
crit = protected['crit']
# Check immediately if we support these critical headers
for k in crit:
if k not in self.header_registry:
raise InvalidJWSObject(
'Unknown critical header: "%s"' % k)
else:
if not self.header_registry[k].supported:
raise InvalidJWSObject(
'Unsupported critical header: "%s"' % k)
header = protected
if 'b64' in header:
if not isinstance(header['b64'], bool):
raise InvalidJWSObject('b64 header must be a boolean')
for hn in headers:
if hn is None:
continue
if header is None:
header = dict()
for h in list(hn.keys()):
if h in self.header_registry:
if self.header_registry[h].mustprotect:
raise InvalidJWSObject('"%s" must be protected' % h)
if h in header:
raise InvalidJWSObject('Duplicate header: "%s"' % h)
header.update(hn)
for k in crit:
if k not in header:
raise InvalidJWSObject('Missing critical header "%s"' % k)
return header
# TODO: support selecting key with 'kid' and passing in multiple keys
def _verify(self, alg, key, payload, signature, protected, header=None):
p = dict()
# verify it is a valid JSON object and decode
if protected is not None:
p = json_decode(protected)
if not isinstance(p, dict):
raise InvalidJWSSignature('Invalid Protected header')
# merge heders, and verify there are no duplicates
if header:
if not isinstance(header, dict):
raise InvalidJWSSignature('Invalid Unprotected header')
# Merge and check (critical) headers
chk_hdrs = self._merge_check_headers(p, header)
for hdr in chk_hdrs:
if hdr in self.header_registry:
if not self.header_registry.check_header(hdr, self):
raise InvalidJWSSignature('Failed header check')
# check 'alg' is present
if alg is None and 'alg' not in p:
raise InvalidJWSSignature('No "alg" in headers')
if alg:
if 'alg' in p and alg != p['alg']:
raise InvalidJWSSignature('"alg" mismatch, requested '
'"%s", found "%s"' % (alg,
p['alg']))
a = alg
else:
a = p['alg']
# the following will verify the "alg" is supported and the signature
# verifies
c = JWSCore(a, key, protected, payload, self._allowed_algs)
c.verify(signature)
def verify(self, key, alg=None):
"""Verifies a JWS token.
:param key: The (:class:`jwcrypto.jwk.JWK`) verification key.
:param alg: The signing algorithm (optional). usually the algorithm
is known as it is provided with the JOSE Headers of the token.
:raises InvalidJWSSignature: if the verification fails.
"""
self.verifylog = list()
self.objects['valid'] = False
obj = self.objects
if 'signature' in obj:
try:
self._verify(alg, key,
obj['payload'],
obj['signature'],
obj.get('protected', None),
obj.get('header', None))
obj['valid'] = True
except Exception as e: # pylint: disable=broad-except
self.verifylog.append('Failed: [%s]' % repr(e))
elif 'signatures' in obj:
for o in obj['signatures']:
try:
self._verify(alg, key,
obj['payload'],
o['signature'],
o.get('protected', None),
o.get('header', None))
# Ok if at least one verifies
obj['valid'] = True
except Exception as e: # pylint: disable=broad-except
self.verifylog.append('Failed: [%s]' % repr(e))
else:
raise InvalidJWSSignature('No signatures available')
if not self.is_valid:
raise InvalidJWSSignature('Verification failed for all '
'signatures' + repr(self.verifylog))
def _deserialize_signature(self, s):
o = dict()
o['signature'] = base64url_decode(str(s['signature']))
if 'protected' in s:
p = base64url_decode(str(s['protected']))
o['protected'] = p.decode('utf-8')
if 'header' in s:
o['header'] = s['header']
return o
def _deserialize_b64(self, o, protected):
if protected is None:
b64n = None
else:
p = json_decode(protected)
b64n = p.get('b64')
if b64n is not None:
if not isinstance(b64n, bool):
raise InvalidJWSObject('b64 header must be boolean')
b64 = o.get('b64')
if b64 == b64n:
return
elif b64 is None:
o['b64'] = b64n
else:
raise InvalidJWSObject('conflicting b64 values')
def deserialize(self, raw_jws, key=None, alg=None):
"""Deserialize a JWS token.
NOTE: Destroys any current status and tries to import the raw
JWS provided.
:param raw_jws: a 'raw' JWS token (JSON Encoded or Compact
notation) string.
:param key: A (:class:`jwcrypto.jwk.JWK`) verification key (optional).
If a key is provided a verification step will be attempted after
the object is successfully deserialized.
:param alg: The signing algorithm (optional). usually the algorithm
is known as it is provided with the JOSE Headers of the token.
:raises InvalidJWSObject: if the raw object is an invalid JWS token.
:raises InvalidJWSSignature: if the verification fails.
"""
self.objects = dict()
o = dict()
try:
try:
djws = json_decode(raw_jws)
if 'signatures' in djws:
o['signatures'] = list()
for s in djws['signatures']:
os = self._deserialize_signature(s)
o['signatures'].append(os)
self._deserialize_b64(o, os.get('protected'))
else:
o = self._deserialize_signature(djws)
self._deserialize_b64(o, o.get('protected'))
if 'payload' in djws:
if o.get('b64', True):
o['payload'] = base64url_decode(str(djws['payload']))
else:
o['payload'] = djws['payload']
except ValueError:
c = raw_jws.split('.')
if len(c) != 3:
raise InvalidJWSObject('Unrecognized representation')
p = base64url_decode(str(c[0]))
if len(p) > 0:
o['protected'] = p.decode('utf-8')
self._deserialize_b64(o, o['protected'])
o['payload'] = base64url_decode(str(c[1]))
o['signature'] = base64url_decode(str(c[2]))
self.objects = o
except Exception as e: # pylint: disable=broad-except
raise InvalidJWSObject('Invalid format', repr(e))
if key:
self.verify(key, alg)
def add_signature(self, key, alg=None, protected=None, header=None):
"""Adds a new signature to the object.
:param key: A (:class:`jwcrypto.jwk.JWK`) key of appropriate for
the "alg" provided.
:param alg: An optional algorithm name. If already provided as an
element of the protected or unprotected header it can be safely
omitted.
:param protected: The Protected Header (optional)
:param header: The Unprotected Header (optional)
:raises InvalidJWSObject: if no payload has been set on the object,
or invalid headers are provided.
:raises ValueError: if the key is not a :class:`JWK` object.
:raises ValueError: if the algorithm is missing or is not provided
by one of the headers.
:raises InvalidJWAAlgorithm: if the algorithm is not valid, is
unknown or otherwise not yet implemented.
"""
if not self.objects.get('payload', None):
raise InvalidJWSObject('Missing Payload')
b64 = True
p = dict()
if protected:
if isinstance(protected, dict):
p = protected
protected = json_encode(p)
else:
p = json_decode(protected)
# If b64 is present we must enforce criticality
if 'b64' in list(p.keys()):
crit = p.get('crit', [])
if 'b64' not in crit:
raise InvalidJWSObject('b64 header must always be critical')
b64 = p['b64']
if 'b64' in self.objects:
if b64 != self.objects['b64']:
raise InvalidJWSObject('Mixed b64 headers on signatures')
h = None
if header:
if isinstance(header, dict):
h = header
header = json_encode(header)
else:
h = json_decode(header)
p = self._merge_check_headers(p, h)
if 'alg' in p:
if alg is None:
alg = p['alg']
elif alg != p['alg']:
raise ValueError('"alg" value mismatch, specified "alg" '
'does not match JOSE header value')
if alg is None:
raise ValueError('"alg" not specified')
c = JWSCore(
alg, key, protected, self.objects['payload'], self.allowed_algs
)
sig = c.sign()
o = dict()
o['signature'] = base64url_decode(sig['signature'])
if protected:
o['protected'] = protected
if header:
o['header'] = h
o['valid'] = True
if 'signatures' in self.objects:
self.objects['signatures'].append(o)
elif 'signature' in self.objects:
self.objects['signatures'] = list()
n = dict()
n['signature'] = self.objects.pop('signature')
if 'protected' in self.objects:
n['protected'] = self.objects.pop('protected')
if 'header' in self.objects:
n['header'] = self.objects.pop('header')
if 'valid' in self.objects:
n['valid'] = self.objects.pop('valid')
self.objects['signatures'].append(n)
self.objects['signatures'].append(o)
else:
self.objects.update(o)
self.objects['b64'] = b64
def serialize(self, compact=False):
"""Serializes the object into a JWS token.
:param compact(boolean): if True generates the compact
representation, otherwise generates a standard JSON format.
:raises InvalidJWSOperation: if the object cannot serialized
with the compact representation and `compact` is True.
:raises InvalidJWSSignature: if no signature has been added
to the object, or no valid signature can be found.
"""
if compact:
if 'signatures' in self.objects:
raise InvalidJWSOperation("Can't use compact encoding with "
"multiple signatures")
if 'signature' not in self.objects:
raise InvalidJWSSignature("No available signature")
if not self.objects.get('valid', False):
raise InvalidJWSSignature("No valid signature found")
if 'protected' in self.objects:
p = json_decode(self.objects['protected'])
if 'alg' not in p:
raise InvalidJWSOperation("Compact encoding must carry "
"'alg' in protected header")
protected = base64url_encode(self.objects['protected'])
else:
raise InvalidJWSOperation("Can't use compact encoding "
"without protected header")
if self.objects.get('payload', False):
if self.objects.get('b64', True):
payload = base64url_encode(self.objects['payload'])
else:
if isinstance(self.objects['payload'], bytes):
payload = self.objects['payload'].decode('utf-8')
else:
payload = self.objects['payload']
if '.' in payload:
raise InvalidJWSOperation(
"Can't use compact encoding with unencoded "
"payload that uses the . character")
else:
payload = ''
return '.'.join([protected, payload,
base64url_encode(self.objects['signature'])])
else:
obj = self.objects
sig = dict()
if self.objects.get('payload', False):
if self.objects.get('b64', True):
sig['payload'] = base64url_encode(self.objects['payload'])
else:
sig['payload'] = self.objects['payload']
if 'signature' in obj:
if not obj.get('valid', False):
raise InvalidJWSSignature("No valid signature found")
sig['signature'] = base64url_encode(obj['signature'])
if 'protected' in obj:
sig['protected'] = base64url_encode(obj['protected'])
if 'header' in obj:
sig['header'] = obj['header']
elif 'signatures' in obj:
sig['signatures'] = list()
for o in obj['signatures']:
if not o.get('valid', False):
continue
s = {'signature': base64url_encode(o['signature'])}
if 'protected' in o:
s['protected'] = base64url_encode(o['protected'])
if 'header' in o:
s['header'] = o['header']
sig['signatures'].append(s)
if len(sig['signatures']) == 0:
raise InvalidJWSSignature("No valid signature found")
else:
raise InvalidJWSSignature("No available signature")
return json_encode(sig)
@property
def payload(self):
if 'payload' not in self.objects:
raise InvalidJWSOperation("Payload not available")
if not self.is_valid:
raise InvalidJWSOperation("Payload not verified")
return self.objects['payload']
def detach_payload(self):
self.objects.pop('payload', None)
@property
def jose_header(self):
obj = self.objects
if 'signature' in obj:
if 'protected' in obj:
p = json_decode(obj['protected'])
else:
p = None
return self._merge_check_headers(p, obj.get('header', dict()))
elif 'signatures' in self.objects:
jhl = list()
for o in obj['signatures']:
jh = dict()
if 'protected' in o:
p = json_decode(o['protected'])
else:
p = None
jh = self._merge_check_headers(p, o.get('header', dict()))
jhl.append(jh)
return jhl
else:
raise InvalidJWSOperation("JOSE Header(s) not available")
| simo5/jwcrypto | jwcrypto/jws.py | Python | lgpl-3.0 | 23,635 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import SimpleTestCase
from localflavor.it.forms import (
ITZipCodeField, ITRegionSelect, ITRegionProvinceSelect,
ITSocialSecurityNumberField, ITVatNumberField, ITPhoneNumberField)
# import calculated data structures
from localflavor.it.it_province import *
from localflavor.it.it_region import *
class ITLocalFlavorTests(SimpleTestCase):
def test_ITRegionSelect(self):
f = ITRegionSelect()
out = '''<select name="regions">
<option value="ABR">Abruzzo</option>
<option value="BAS">Basilicata</option>
<option value="CAL">Calabria</option>
<option value="CAM">Campania</option>
<option value="EMR">Emilia-Romagna</option>
<option value="FVG">Friuli-Venezia Giulia</option>
<option value="LAZ">Lazio</option>
<option value="LIG">Liguria</option>
<option value="LOM">Lombardia</option>
<option value="MAR">Marche</option>
<option value="MOL">Molise</option>
<option value="PMN" selected="selected">Piemonte</option>
<option value="PUG">Puglia</option>
<option value="SAR">Sardegna</option>
<option value="SIC">Sicilia</option>
<option value="TOS">Toscana</option>
<option value="TAA">Trentino-Alto Adige</option>
<option value="UMB">Umbria</option>
<option value="VAO">Valle d\u2019Aosta</option>
<option value="VEN">Veneto</option>
</select>'''
self.assertHTMLEqual(f.render('regions', 'PMN'), out)
def test_ITRegionProvinceSelect(self):
f = ITRegionProvinceSelect()
out = '''<select name="region_provinces">
<optgroup label="Abruzzo">
<option value="CH">Chieti</option>
<option value="AQ">L\u2019Aquila</option>
<option value="PE" selected="selected">Pescara</option>
<option value="TE">Teramo</option>
</optgroup>
<optgroup label="Basilicata">
<option value="MT">Matera</option>
<option value="PZ">Potenza</option>
</optgroup>
<optgroup label="Calabria">
<option value="CZ">Catanzaro</option>
<option value="CS">Cosenza</option>
<option value="KR">Crotone</option>
<option value="RC">Reggio Calabria</option>
<option value="VV">Vibo Valentia</option>
</optgroup>
<optgroup label="Campania">
<option value="AV">Avellino</option>
<option value="BN">Benevento</option>
<option value="CE">Caserta</option>
<option value="NA">Napoli</option>
<option value="SA">Salerno</option>
</optgroup>
<optgroup label="Emilia-Romagna">
<option value="BO">Bologna</option>
<option value="FE">Ferrara</option>
<option value="FC">Forl\xec-Cesena</option>
<option value="MO">Modena</option>
<option value="PR">Parma</option>
<option value="PC">Piacenza</option>
<option value="RA">Ravenna</option>
<option value="RE">Reggio Emilia</option>
<option value="RN">Rimini</option>
</optgroup>
<optgroup label="Friuli-Venezia Giulia">
<option value="GO">Gorizia</option>
<option value="PN">Pordenone</option>
<option value="TS">Trieste</option>
<option value="UD">Udine</option>
</optgroup>
<optgroup label="Lazio">
<option value="FR">Frosinone</option>
<option value="LT">Latina</option>
<option value="RI">Rieti</option>
<option value="RM">Roma</option>
<option value="VT">Viterbo</option>
</optgroup>
<optgroup label="Liguria">
<option value="GE">Genova</option>
<option value="IM">Imperia</option>
<option value="SP">La Spezia</option>
<option value="SV">Savona</option>
</optgroup>
<optgroup label="Lombardia">
<option value="BG">Bergamo</option>
<option value="BS">Brescia</option>
<option value="CO">Como</option>
<option value="CR">Cremona</option>
<option value="LC">Lecco</option>
<option value="LO">Lodi</option>
<option value="MN">Mantova</option>
<option value="MI">Milano</option>
<option value="MB">Monza e Brianza</option>
<option value="PV">Pavia</option>
<option value="SO">Sondrio</option>
<option value="VA">Varese</option>
</optgroup>
<optgroup label="Marche">
<option value="AN">Ancona</option>
<option value="AP">Ascoli Piceno</option>
<option value="FM">Fermo</option>
<option value="MC">Macerata</option>
<option value="PU">Pesaro e Urbino</option>
</optgroup>
<optgroup label="Molise">
<option value="CB">Campobasso</option>
<option value="IS">Isernia</option>
</optgroup>
<optgroup label="Piemonte">
<option value="AL">Alessandria</option>
<option value="AT">Asti</option>
<option value="BI">Biella</option>
<option value="CN">Cuneo</option>
<option value="NO">Novara</option>
<option value="TO">Torino</option>
<option value="VB">Verbano Cusio Ossola</option>
<option value="VC">Vercelli</option>
</optgroup>
<optgroup label="Puglia">
<option value="BA">Bari</option>
<option value="BT">Barletta-Andria-Trani</option>
<option value="BR">Brindisi</option>
<option value="FG">Foggia</option>
<option value="LE">Lecce</option>
<option value="TA">Taranto</option>
</optgroup>
<optgroup label="Sardegna">
<option value="CA">Cagliari</option>
<option value="CI">Carbonia-Iglesias</option>
<option value="VS">Medio Campidano</option>
<option value="NU">Nuoro</option>
<option value="OG">Ogliastra</option>
<option value="OT">Olbia-Tempio</option>
<option value="OR">Oristano</option>
<option value="SS">Sassari</option>
</optgroup>
<optgroup label="Sicilia">
<option value="AG">Agrigento</option>
<option value="CL">Caltanissetta</option>
<option value="CT">Catania</option>
<option value="EN">Enna</option>
<option value="ME">Messina</option>
<option value="PA">Palermo</option>
<option value="RG">Ragusa</option>
<option value="SR">Siracusa</option>
<option value="TP">Trapani</option>
</optgroup>
<optgroup label="Toscana">
<option value="AR">Arezzo</option>
<option value="FI">Firenze</option>
<option value="GR">Grosseto</option>
<option value="LI">Livorno</option>
<option value="LU">Lucca</option>
<option value="MS">Massa-Carrara</option>
<option value="PI">Pisa</option>
<option value="PT">Pistoia</option>
<option value="PO">Prato</option>
<option value="SI">Siena</option>
</optgroup>
<optgroup label="Trentino-Alto Adige">
<option value="BZ">Bolzano/Bozen</option>
<option value="TN">Trento</option>
</optgroup>
<optgroup label="Umbria">
<option value="PG">Perugia</option>
<option value="TR">Terni</option>
</optgroup>
<optgroup label="Valle d\u2019Aosta">
<option value="AO">Aosta</option>
</optgroup>
<optgroup label="Veneto">
<option value="BL">Belluno</option>
<option value="PD">Padova</option>
<option value="RO">Rovigo</option>
<option value="TV">Treviso</option>
<option value="VE">Venezia</option>
<option value="VR">Verona</option>
<option value="VI">Vicenza</option>
</optgroup>
</select>'''
self.assertHTMLEqual(f.render('region_provinces', 'PE'), out)
def test_ITZipCodeField(self):
error_invalid = ['Enter a valid zip code.']
valid = {
'00100': '00100',
}
invalid = {
' 00100': error_invalid,
}
self.assertFieldOutput(ITZipCodeField, valid, invalid)
def test_ITSocialSecurityNumberField(self):
error_invalid = ['Enter a valid Social Security number.']
valid = {
'LVSGDU99T71H501L': 'LVSGDU99T71H501L',
'LBRRME11A01L736W': 'LBRRME11A01L736W',
'lbrrme11a01l736w': 'LBRRME11A01L736W',
'LBR RME 11A01 L736W': 'LBRRME11A01L736W',
}
invalid = {
'LBRRME11A01L736A': error_invalid,
'%BRRME11A01L736W': error_invalid,
}
self.assertFieldOutput(ITSocialSecurityNumberField, valid, invalid)
def test_ITSocialSecurityNumberField_for_entities(self):
error_invalid = ['Enter a valid Social Security number.']
valid = {
'07973780013': '07973780013',
'7973780013': '07973780013',
7973780013: '07973780013',
}
invalid = {
'07973780014': error_invalid,
'A7973780013': error_invalid,
}
self.assertFieldOutput(ITSocialSecurityNumberField, valid, invalid)
def test_ITVatNumberField(self):
error_invalid = ['Enter a valid VAT number.']
valid = {
'07973780013': '07973780013',
'7973780013': '07973780013',
7973780013: '07973780013',
}
invalid = {
'07973780014': error_invalid,
'A7973780013': error_invalid,
}
self.assertFieldOutput(ITVatNumberField, valid, invalid)
def test_ITPhoneNumberField(self):
error_format = ['Enter a valid Italian phone number.']
valid = {
'+39 347 1234567': '347 1234567',
'39 347 123 4567': '347 1234567',
'347-1234567': '347 1234567',
'3471234567': '347 1234567',
'+39 347 12345678': '347 12345678',
'39 347 123 45678': '347 12345678',
'347-12345678': '347 12345678',
'34712345678': '347 12345678',
'+39 347 123456': '347 123456',
'39 347 123 456': '347 123456',
'347-123456': '347 123456',
'347123456': '347 123456',
'+39 0861 12345678': '0861 12345678',
'39 0861 1234 5678': '0861 12345678',
'0861-12345678': '0861 12345678',
'0861 12345': '0861 12345',
}
invalid = {
'+44 347 1234567': error_format,
'14471234567': error_format,
'0861 123456789': error_format,
'08661234567890': error_format,
}
self.assertFieldOutput(ITPhoneNumberField, valid, invalid)
| zarelit/django-localflavor | tests/test_it.py | Python | bsd-3-clause | 9,354 |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Note: The deserialization code originally comes from ABE.
import bitcoin
from bitcoin import *
from util import print_error
import time
import sys
import struct
#
# Workalike python implementation of Bitcoin's CDataStream class.
#
import struct
import StringIO
import mmap
import random
NO_SIGNATURE = 'ff'
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class BCDataStream(object):
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, bytes): # Initialize with string of bytes
if self.input is None:
self.input = bytes
else:
self.input += bytes
def map_file(self, file, start): # Initialize with bytes from file
self.input = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ)
self.read_cursor = start
def seek_file(self, position):
self.read_cursor = position
def close_file(self):
self.input.close()
def read_string(self):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
try:
length = self.read_compact_size()
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return self.read_bytes(length)
def write_string(self, string):
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return ''
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16(self): return self._read_num('<h')
def read_uint16(self): return self._read_num('<H')
def read_int32(self): return self._read_num('<i')
def read_uint32(self): return self._read_num('<I')
def read_int64(self): return self._read_num('<q')
def read_uint64(self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16(self, val): return self._write_num('<h', val)
def write_uint16(self, val): return self._write_num('<H', val)
def write_int32(self, val): return self._write_num('<i', val)
def write_uint32(self, val): return self._write_num('<I', val)
def write_int64(self, val): return self._write_num('<q', val)
def write_uint64(self, val): return self._write_num('<Q', val)
def read_compact_size(self):
size = ord(self.input[self.read_cursor])
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(chr(size))
elif size < 2**16:
self.write('\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write('\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write('\xff')
self._write_num('<Q', size)
def _read_num(self, format):
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
#
# enum-like type
# From the Python Cookbook, downloaded from http://code.activestate.com/recipes/67107/
#
import types, string, exceptions
class EnumException(exceptions.Exception):
pass
class Enumeration:
def __init__(self, name, enumList):
self.__doc__ = name
lookup = { }
reverseLookup = { }
i = 0
uniqueNames = [ ]
uniqueValues = [ ]
for x in enumList:
if type(x) == types.TupleType:
x, i = x
if type(x) != types.StringType:
raise EnumException, "enum name is not a string: " + x
if type(i) != types.IntType:
raise EnumException, "enum value is not an integer: " + i
if x in uniqueNames:
raise EnumException, "enum name is not unique: " + x
if i in uniqueValues:
raise EnumException, "enum value is not unique for " + x
uniqueNames.append(x)
uniqueValues.append(i)
lookup[x] = i
reverseLookup[i] = x
i = i + 1
self.lookup = lookup
self.reverseLookup = reverseLookup
def __getattr__(self, attr):
if not self.lookup.has_key(attr):
raise AttributeError
return self.lookup[attr]
def whatis(self, value):
return self.reverseLookup[value]
# This function comes from bitcointools, bct-LICENSE.txt.
def long_hex(bytes):
return bytes.encode('hex_codec')
# This function comes from bitcointools, bct-LICENSE.txt.
def short_hex(bytes):
t = bytes.encode('hex_codec')
if len(t) < 11:
return t
return t[0:4]+"..."+t[-4:]
opcodes = Enumeration("Opcodes", [
("OP_0", 0), ("OP_PUSHDATA1",76), "OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE", "OP_RESERVED",
"OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7",
"OP_8", "OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16",
"OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF", "OP_ELSE", "OP_ENDIF", "OP_VERIFY",
"OP_RETURN", "OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP", "OP_2OVER", "OP_2ROT", "OP_2SWAP",
"OP_IFDUP", "OP_DEPTH", "OP_DROP", "OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT",
"OP_SWAP", "OP_TUCK", "OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE", "OP_INVERT", "OP_AND",
"OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY", "OP_RESERVED1", "OP_RESERVED2", "OP_1ADD", "OP_1SUB", "OP_2MUL",
"OP_2DIV", "OP_NEGATE", "OP_ABS", "OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV",
"OP_MOD", "OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR",
"OP_NUMEQUAL", "OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN",
"OP_GREATERTHAN", "OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX",
"OP_WITHIN", "OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160",
"OP_HASH256", "OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG",
"OP_CHECKMULTISIGVERIFY",
("OP_SINGLEBYTE_END", 0xF0),
("OP_DOUBLEBYTE_BEGIN", 0xF000),
"OP_PUBKEY", "OP_PUBKEYHASH",
("OP_INVALIDOPCODE", 0xFFFF),
])
def script_GetOp(bytes):
i = 0
while i < len(bytes):
vch = None
opcode = ord(bytes[i])
i += 1
if opcode >= opcodes.OP_SINGLEBYTE_END:
opcode <<= 8
opcode |= ord(bytes[i])
i += 1
if opcode <= opcodes.OP_PUSHDATA4:
nSize = opcode
if opcode == opcodes.OP_PUSHDATA1:
nSize = ord(bytes[i])
i += 1
elif opcode == opcodes.OP_PUSHDATA2:
(nSize,) = struct.unpack_from('<H', bytes, i)
i += 2
elif opcode == opcodes.OP_PUSHDATA4:
(nSize,) = struct.unpack_from('<I', bytes, i)
i += 4
vch = bytes[i:i+nSize]
i += nSize
yield (opcode, vch, i)
def script_GetOpName(opcode):
return (opcodes.whatis(opcode)).replace("OP_", "")
def decode_script(bytes):
result = ''
for (opcode, vch, i) in script_GetOp(bytes):
if len(result) > 0: result += " "
if opcode <= opcodes.OP_PUSHDATA4:
result += "%d:"%(opcode,)
result += short_hex(vch)
else:
result += script_GetOpName(opcode)
return result
def match_decoded(decoded, to_match):
if len(decoded) != len(to_match):
return False;
for i in range(len(decoded)):
if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4 and decoded[i][0]>0:
continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
if to_match[i] != decoded[i][0]:
return False
return True
def parse_sig(x_sig):
s = []
for sig in x_sig:
if sig[-2:] == '01':
s.append(sig[:-2])
else:
assert sig == NO_SIGNATURE
s.append(None)
return s
def is_extended_pubkey(x_pubkey):
return x_pubkey[0:2] in ['fe', 'ff']
def x_to_xpub(x_pubkey):
if x_pubkey[0:2] == 'ff':
from account import BIP32_Account
xpub, s = BIP32_Account.parse_xpubkey(x_pubkey)
return xpub
def parse_xpub(x_pubkey):
if x_pubkey[0:2] in ['02','03','04']:
pubkey = x_pubkey
elif x_pubkey[0:2] == 'ff':
from account import BIP32_Account
xpub, s = BIP32_Account.parse_xpubkey(x_pubkey)
pubkey = BIP32_Account.derive_pubkey_from_xpub(xpub, s[0], s[1])
elif x_pubkey[0:2] == 'fe':
from account import OldAccount
mpk, s = OldAccount.parse_xpubkey(x_pubkey)
pubkey = OldAccount.get_pubkey_from_mpk(mpk.decode('hex'), s[0], s[1])
elif x_pubkey[0:2] == 'fd':
addrtype = ord(x_pubkey[2:4].decode('hex'))
hash160 = x_pubkey[4:].decode('hex')
pubkey = None
address = hash_160_to_bc_address(hash160, addrtype)
else:
raise BaseException("Cannnot parse pubkey")
if pubkey:
address = public_key_to_bc_address(pubkey.decode('hex'))
return pubkey, address
def parse_scriptSig(d, bytes):
try:
decoded = [ x for x in script_GetOp(bytes) ]
except Exception:
# coinbase transactions raise an exception
print_error("cannot find address in input script", bytes.encode('hex'))
return
# payto_pubkey
match = [ opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
sig = decoded[0][1].encode('hex')
d['address'] = "(pubkey)"
d['signatures'] = [sig]
d['num_sig'] = 1
d['x_pubkeys'] = ["(pubkey)"]
d['pubkeys'] = ["(pubkey)"]
return
# non-generated TxIn transactions push a signature
# (seventy-something bytes) and then their public key
# (65 bytes) onto the stack:
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
sig = decoded[0][1].encode('hex')
x_pubkey = decoded[1][1].encode('hex')
try:
signatures = parse_sig([sig])
pubkey, address = parse_xpub(x_pubkey)
except:
import traceback
traceback.print_exc(file=sys.stdout)
print_error("cannot find address in input script", bytes.encode('hex'))
return
d['signatures'] = signatures
d['x_pubkeys'] = [x_pubkey]
d['num_sig'] = 1
d['pubkeys'] = [pubkey]
d['address'] = address
return
# p2sh transaction, m of n
match = [ opcodes.OP_0 ] + [ opcodes.OP_PUSHDATA4 ] * (len(decoded) - 1)
if not match_decoded(decoded, match):
print_error("cannot find address in input script", bytes.encode('hex'))
return
x_sig = [x[1].encode('hex') for x in decoded[1:-1]]
dec2 = [ x for x in script_GetOp(decoded[-1][1]) ]
m = dec2[0][0] - opcodes.OP_1 + 1
n = dec2[-2][0] - opcodes.OP_1 + 1
op_m = opcodes.OP_1 + m - 1
op_n = opcodes.OP_1 + n - 1
match_multisig = [ op_m ] + [opcodes.OP_PUSHDATA4]*n + [ op_n, opcodes.OP_CHECKMULTISIG ]
if not match_decoded(dec2, match_multisig):
print_error("cannot find address in input script", bytes.encode('hex'))
return
x_pubkeys = map(lambda x: x[1].encode('hex'), dec2[1:-2])
pubkeys = [parse_xpub(x)[0] for x in x_pubkeys] # xpub, addr = parse_xpub()
redeemScript = Transaction.multisig_script(pubkeys, m)
# write result in d
d['num_sig'] = m
d['signatures'] = parse_sig(x_sig)
d['x_pubkeys'] = x_pubkeys
d['pubkeys'] = pubkeys
d['redeemScript'] = redeemScript
d['address'] = hash_160_to_bc_address(hash_160(redeemScript.decode('hex')), 5)
def get_address_from_output_script(bytes):
decoded = [ x for x in script_GetOp(bytes) ]
# The Genesis Block, self-payments, and pay-by-IP-address payments look like:
# 65 BYTES:... CHECKSIG
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return 'pubkey', decoded[0][1].encode('hex')
# Pay-by-Bitcoin-address TxOuts look like:
# DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG
match = [ opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return 'address', hash_160_to_bc_address(decoded[2][1])
# p2sh
match = [ opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUAL ]
if match_decoded(decoded, match):
return 'address', hash_160_to_bc_address(decoded[1][1],5)
return 'script', bytes
def parse_input(vds):
d = {}
prevout_hash = hash_encode(vds.read_bytes(32))
prevout_n = vds.read_uint32()
scriptSig = vds.read_bytes(vds.read_compact_size())
d['scriptSig'] = scriptSig.encode('hex')
sequence = vds.read_uint32()
if prevout_hash == '00'*32:
d['is_coinbase'] = True
else:
d['is_coinbase'] = False
d['prevout_hash'] = prevout_hash
d['prevout_n'] = prevout_n
d['sequence'] = sequence
d['pubkeys'] = []
d['signatures'] = {}
d['address'] = None
if scriptSig:
parse_scriptSig(d, scriptSig)
return d
def parse_output(vds, i):
d = {}
d['value'] = vds.read_int64()
scriptPubKey = vds.read_bytes(vds.read_compact_size())
d['type'], d['address'] = get_address_from_output_script(scriptPubKey)
d['scriptPubKey'] = scriptPubKey.encode('hex')
d['prevout_n'] = i
return d
def deserialize(raw):
vds = BCDataStream()
vds.write(raw.decode('hex'))
d = {}
start = vds.read_cursor
d['version'] = vds.read_int32()
d['nTime'] = vds.read_uint32()
n_vin = vds.read_compact_size()
d['inputs'] = list(parse_input(vds) for i in xrange(n_vin))
n_vout = vds.read_compact_size()
d['outputs'] = list(parse_output(vds,i) for i in xrange(n_vout))
d['lockTime'] = vds.read_uint32()
return d
def push_script(x):
return op_push(len(x)/2) + x
class Transaction:
def __str__(self):
if self.raw is None:
self.raw = self.serialize()
return self.raw
def __init__(self, raw):
self.raw = raw
self.inputs = None
def update(self, raw):
self.raw = raw
self.inputs = None
self.deserialize()
def update_signatures(self, raw):
"""Add new signatures to a transaction"""
d = deserialize(raw)
for i, txin in enumerate(self.inputs):
sigs1 = txin.get('signatures')
sigs2 = d['inputs'][i].get('signatures')
for sig in sigs2:
if sig in sigs1:
continue
for_sig = Hash(self.tx_for_sig(i).decode('hex'))
# der to string
order = ecdsa.ecdsa.generator_secp256k1.order()
r, s = ecdsa.util.sigdecode_der(sig.decode('hex'), order)
sig_string = ecdsa.util.sigencode_string(r, s, order)
pubkeys = txin.get('pubkeys')
compressed = True
for recid in range(4):
public_key = MyVerifyingKey.from_signature(sig_string, recid, for_sig, curve = SECP256k1)
pubkey = point_to_ser(public_key.pubkey.point, compressed).encode('hex')
if pubkey in pubkeys:
public_key.verify_digest(sig_string, for_sig, sigdecode = ecdsa.util.sigdecode_string)
j = pubkeys.index(pubkey)
print_error("adding sig", i, j, pubkey, sig)
self.inputs[i]['signatures'][j] = sig
self.inputs[i]['x_pubkeys'][j] = pubkey
break
# redo raw
self.raw = self.serialize()
def deserialize(self):
if self.raw is None:
self.raw = self.serialize()
if self.inputs is not None:
return
d = deserialize(self.raw)
self.inputs = d['inputs']
self.outputs = [(x['type'], x['address'], x['value']) for x in d['outputs']]
self.locktime = d['lockTime']
return d
@classmethod
def from_io(klass, inputs, outputs, locktime=0, nTime=0):
self = klass(None)
self.inputs = inputs
self.outputs = outputs
self.locktime = locktime
print("from_io")
if nTime == 0:
self.time = int(time.time()) # bitspill
else:
self.time = nTime
#self.raw = self.serialize()
return self
@classmethod
def sweep(klass, privkeys, network, to_address, fee):
inputs = []
for privkey in privkeys:
pubkey = public_key_from_private_key(privkey)
address = address_from_private_key(privkey)
u = network.synchronous_get([ ('blockchain.address.listunspent',[address])])[0]
pay_script = klass.pay_script('address', address)
for item in u:
item['scriptPubKey'] = pay_script
item['redeemPubkey'] = pubkey
item['address'] = address
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [pubkey]
item['signatures'] = [None]
item['num_sig'] = 1
inputs += u
if not inputs:
return
total = sum(i.get('value') for i in inputs) - fee
outputs = [('address', to_address, total)]
self = klass.from_io(inputs, outputs)
self.sign({ pubkey:privkey })
return self
@classmethod
def multisig_script(klass, public_keys, m):
n = len(public_keys)
assert n <= 15
assert m <= n
op_m = format(opcodes.OP_1 + m - 1, 'x')
op_n = format(opcodes.OP_1 + n - 1, 'x')
keylist = [op_push(len(k)/2) + k for k in public_keys]
return op_m + ''.join(keylist) + op_n + 'ae'
@classmethod
def pay_script(self, output_type, addr):
if output_type == 'script':
return addr.encode('hex')
elif output_type == 'address':
addrtype, hash_160 = bc_address_to_hash_160(addr)
if addrtype == 30:
script = '76a9' # op_dup, op_hash_160
script += push_script(hash_160.encode('hex'))
script += '88ac' # op_equalverify, op_checksig
elif addrtype == 33:
script = 'a9' # op_hash_160
script += push_script(hash_160.encode('hex'))
script += '87' # op_equal
else:
raise
else:
raise
return script
def input_script(self, txin, i, for_sig):
# for_sig:
# -1 : do not sign, estimate length
# i>=0 : serialized tx for signing input i
# None : add all known signatures
p2sh = txin.get('redeemScript') is not None
num_sig = txin['num_sig'] if p2sh else 1
address = txin['address']
x_signatures = txin['signatures']
signatures = filter(None, x_signatures)
is_complete = len(signatures) == num_sig
if for_sig in [-1, None]:
# if we have enough signatures, we use the actual pubkeys
# use extended pubkeys (with bip32 derivation)
if for_sig == -1:
# we assume that signature will be 0x48 bytes long
pubkeys = txin['pubkeys']
sig_list = [ "00" * 0x48 ] * num_sig
elif is_complete:
pubkeys = txin['pubkeys']
sig_list = ((sig + '01') for sig in signatures)
else:
pubkeys = txin['x_pubkeys']
sig_list = ((sig + '01') if sig else NO_SIGNATURE for sig in x_signatures)
script = ''.join(push_script(x) for x in sig_list)
if not p2sh:
x_pubkey = pubkeys[0]
if x_pubkey is None:
addrtype, h160 = bc_address_to_hash_160(txin['address'])
x_pubkey = 'fd' + (chr(addrtype) + h160).encode('hex')
script += push_script(x_pubkey)
else:
script = '00' + script # put op_0 in front of script
redeem_script = self.multisig_script(pubkeys, num_sig)
script += push_script(redeem_script)
elif for_sig==i:
script = txin['redeemScript'] if p2sh else self.pay_script('address', address)
else:
script = ''
return script
def BIP_LI01_sort(self):
# See https://github.com/kristovatlas/rfc/blob/master/bips/bip-li01.mediawiki
self.inputs.sort(key = lambda i: (i['prevout_hash'], i['prevout_n']))
self.outputs.sort(key = lambda o: (o[2], self.pay_script(o[0], o[1])))
def serialize(self, for_sig=None):
inputs = self.inputs
outputs = self.outputs
time = self.time # bitspill
print("Serializing transaction: time: %d"%time)
s = int_to_hex(1,4) # version
s += int_to_hex(time,4) # bitspill # nTime
s += var_int( len(inputs) ) # number of inputs
for i, txin in enumerate(inputs):
s += txin['prevout_hash'].decode('hex')[::-1].encode('hex') # prev hash
s += int_to_hex(txin['prevout_n'], 4) # prev index
script = self.input_script(txin, i, for_sig)
s += var_int( len(script)/2 ) # script length
s += script
s += "ffffffff" # sequence
s += var_int( len(outputs) ) # number of outputs
for output in outputs:
output_type, addr, amount = output
s += int_to_hex( amount, 8) # amount
script = self.pay_script(output_type, addr)
s += var_int( len(script)/2 ) # script length
s += script # script
s += int_to_hex(0,4) # lock time
if for_sig is not None and for_sig != -1:
s += int_to_hex(1, 4) # hash type
return s
def tx_for_sig(self,i):
return self.serialize(for_sig = i)
def hash(self):
return Hash(self.raw.decode('hex') )[::-1].encode('hex')
def add_input(self, input):
self.inputs.append(input)
self.raw = None
def input_value(self):
return sum(x['value'] for x in self.inputs)
def output_value(self):
return sum( val for tp,addr,val in self.outputs)
def get_fee(self):
return self.input_value() - self.output_value()
def signature_count(self):
r = 0
s = 0
for txin in self.inputs:
if txin.get('is_coinbase'):
continue
signatures = filter(None, txin.get('signatures',[]))
s += len(signatures)
r += txin.get('num_sig',-1)
return s, r
def is_complete(self):
s, r = self.signature_count()
return r == s
def inputs_to_sign(self):
out = set()
for txin in self.inputs:
num_sig = txin.get('num_sig')
if num_sig is None:
continue
x_signatures = txin['signatures']
signatures = filter(None, x_signatures)
if len(signatures) == num_sig:
# input is complete
continue
for k, x_pubkey in enumerate(txin['x_pubkeys']):
if x_signatures[k] is not None:
# this pubkey already signed
continue
out.add(x_pubkey)
return out
def sign(self, keypairs):
for i, txin in enumerate(self.inputs):
num = txin['num_sig']
for x_pubkey in txin['x_pubkeys']:
signatures = filter(None, txin['signatures'])
if len(signatures) == num:
# txin is complete
break
if x_pubkey in keypairs.keys():
print_error("adding signature for", x_pubkey)
# add pubkey to txin
txin = self.inputs[i]
x_pubkeys = txin['x_pubkeys']
ii = x_pubkeys.index(x_pubkey)
sec = keypairs[x_pubkey]
pubkey = public_key_from_private_key(sec)
txin['x_pubkeys'][ii] = pubkey
txin['pubkeys'][ii] = pubkey
self.inputs[i] = txin
# add signature
for_sig = Hash(self.tx_for_sig(i).decode('hex'))
pkey = regenerate_key(sec)
secexp = pkey.secret
private_key = ecdsa.SigningKey.from_secret_exponent( secexp, curve = SECP256k1 )
public_key = private_key.get_verifying_key()
sig = private_key.sign_digest_deterministic( for_sig, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_der )
assert public_key.verify_digest( sig, for_sig, sigdecode = ecdsa.util.sigdecode_der)
txin['signatures'][ii] = sig.encode('hex')
self.inputs[i] = txin
print_error("is_complete", self.is_complete())
self.raw = self.serialize()
def get_outputs(self):
"""convert pubkeys to addresses"""
o = []
for type, x, v in self.outputs:
if type == 'address':
addr = x
elif type == 'pubkey':
addr = public_key_to_bc_address(x.decode('hex'))
else:
addr = 'SCRIPT ' + x.encode('hex')
o.append((addr,v)) # consider using yield (addr, v)
return o
def get_output_addresses(self):
return [addr for addr, val in self.get_outputs()]
def has_address(self, addr):
return (addr in self.get_output_addresses()) or (addr in (tx.get("address") for tx in self.inputs))
def as_dict(self):
if self.raw is None:
self.raw = self.serialize()
self.deserialize()
out = {
'hex': self.raw,
'complete': self.is_complete()
}
return out
def requires_fee(self, wallet):
# see https://en.bitcoin.it/wiki/Transaction_fees
#
# size must be smaller than 1 kbyte for free tx
size = len(self.serialize(-1))/2
if size >= 10000:
return True
# all outputs must be 0.01 BTC or larger for free tx
for addr, value in self.get_outputs():
if value < 1000000:
return True
# priority must be large enough for free tx
threshold = 57600000
weight = 0
for txin in self.inputs:
age = wallet.get_confirmations(txin["prevout_hash"])[0]
weight += txin["value"] * age
priority = weight / size
print_error(priority, threshold)
return priority < threshold
| harwee/electrum-xvg-tor | lib/transaction.py | Python | gpl-3.0 | 29,682 |
# Copyright (c) 2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
# based on http://protips.readthedocs.io/link-roles.html
from __future__ import print_function
from __future__ import unicode_literals
import re
from docutils import nodes
from local_util import run_cmd_get_output
def get_github_rev():
tag = run_cmd_get_output('git describe --exact-match')
if tag:
return tag
else:
return 'master'
def setup(app):
rev = get_github_rev()
# links to files or folders on the GitHub
baseurl = 'https://github.com/zephyrproject-rtos/zephyr'
app.add_role('zephyr_file', autolink('{}/blob/{}/%s'.format(baseurl, rev)))
app.add_role('zephyr_raw', autolink('{}/raw/{}/%s'.format(baseurl, rev)))
# The role just creates new nodes based on information in the
# arguments; its behavior doesn't depend on any other documents.
return {
'parallel_read_safe': True,
'parallel_write_safe': True,
}
def autolink(pattern):
def role(name, rawtext, text, lineno, inliner, options={}, content=[]):
m = re.search(r'(.*)\s*<(.*)>', text)
if m:
link_text = m.group(1)
link = m.group(2)
else:
link_text = text
link = text
url = pattern % (link,)
node = nodes.reference(rawtext, link_text, refuri=url, **options)
return [node], []
return role
| ldts/zephyr | doc/extensions/zephyr/link-roles.py | Python | apache-2.0 | 1,423 |
"""
When squirrels get together for a party, they like to have cigars.
A squirrel party is successful when the number of cigars is between
40 and 60, inclusive. Unless it is the weekend, in which case there
is no upper bound on the number of cigars.
Return True if the party with the given values is successful,
or False otherwise.
"""
<<<<<<< HEAD
def cigar_party(cigars, weekend):
return cigars >= 40 and (cigars <= 60 or weekend)
=======
def cigar_party(num, weekend):
return num >= 40 and (num <= 60 or weekend)
>>>>>>> 82918d5472627037e6cfaa6846d007c78ac8ef6c
| UWPCE-PythonCert/IntroPython2016 | students/baumel/session_06/cigar_party.py | Python | unlicense | 579 |
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
import api_schema_graph
from availability_finder import AvailabilityFinder, AvailabilityInfo
from branch_utility import BranchUtility, ChannelInfo
from compiled_file_system import CompiledFileSystem
from fake_host_file_system_provider import FakeHostFileSystemProvider
from fake_url_fetcher import FakeUrlFetcher
from host_file_system_iterator import HostFileSystemIterator
from mock_function import MockFunction
from object_store_creator import ObjectStoreCreator
from test_data.canned_data import (CANNED_API_FILE_SYSTEM_DATA, CANNED_BRANCHES)
from test_data.object_level_availability.tabs import TABS_SCHEMA_BRANCHES
from test_util import Server2Path
TABS_UNMODIFIED_VERSIONS = (16, 20, 23, 24)
class AvailabilityFinderTest(unittest.TestCase):
def setUp(self):
self._branch_utility = BranchUtility(
os.path.join('branch_utility', 'first.json'),
os.path.join('branch_utility', 'second.json'),
FakeUrlFetcher(Server2Path('test_data')),
ObjectStoreCreator.ForTest())
api_fs_creator = FakeHostFileSystemProvider(CANNED_API_FILE_SYSTEM_DATA)
self._node_fs_creator = FakeHostFileSystemProvider(TABS_SCHEMA_BRANCHES)
def create_availability_finder(host_fs_creator):
test_object_store = ObjectStoreCreator.ForTest()
return AvailabilityFinder(
self._branch_utility,
CompiledFileSystem.Factory(test_object_store),
HostFileSystemIterator(host_fs_creator,
self._branch_utility),
host_fs_creator.GetTrunk(),
test_object_store)
self._avail_finder = create_availability_finder(api_fs_creator)
self._node_avail_finder = create_availability_finder(self._node_fs_creator)
# Imitate the actual SVN file system by incrementing the stats for paths
# where an API schema has changed.
last_stat = type('last_stat', (object,), {'val': 0})
def stat_paths(file_system, channel_info):
if channel_info.version not in TABS_UNMODIFIED_VERSIONS:
last_stat.val += 1
# HACK: |file_system| is a MockFileSystem backed by a TestFileSystem.
# Increment the TestFileSystem stat count.
file_system._file_system.IncrementStat(by=last_stat.val)
# Continue looping. The iterator will stop after 'trunk' automatically.
return True
# Use the HostFileSystemIterator created above to change global stat values
# for the TestFileSystems that it creates.
self._node_avail_finder._file_system_iterator.Ascending(
# The earliest version represented with the tabs' test data is 13.
self._branch_utility.GetStableChannelInfo(13),
stat_paths)
def testGraphOptimization(self):
# Keep track of how many times the APISchemaGraph constructor is called.
original_constructor = api_schema_graph.APISchemaGraph
mock_constructor = MockFunction(original_constructor)
api_schema_graph.APISchemaGraph = mock_constructor
try:
# The test data includes an extra branch where the API does not exist.
num_versions = len(TABS_SCHEMA_BRANCHES) - 1
# We expect an APISchemaGraph to be created only when an API schema file
# has different stat data from the previous version's schema file.
num_graphs_created = num_versions - len(TABS_UNMODIFIED_VERSIONS)
# Run the logic for object-level availability for an API.
self._node_avail_finder.GetAPINodeAvailability('tabs')
self.assertTrue(*api_schema_graph.APISchemaGraph.CheckAndReset(
num_graphs_created))
finally:
# Ensure that the APISchemaGraph constructor is reset to be the original
# constructor.
api_schema_graph.APISchemaGraph = original_constructor
def testGetAPIAvailability(self):
# Key: Using 'channel' (i.e. 'beta') to represent an availability listing
# for an API in a _features.json file, and using |channel| (i.e. |dev|) to
# represent the development channel, or phase of development, where an API's
# availability is being checked.
# Testing APIs with predetermined availability.
self.assertEqual(
AvailabilityInfo(ChannelInfo('trunk', 'trunk', 'trunk')),
self._avail_finder.GetAPIAvailability('jsonTrunkAPI'))
self.assertEqual(
AvailabilityInfo(ChannelInfo('dev', CANNED_BRANCHES[28], 28)),
self._avail_finder.GetAPIAvailability('jsonDevAPI'))
self.assertEqual(
AvailabilityInfo(ChannelInfo('beta', CANNED_BRANCHES[27], 27)),
self._avail_finder.GetAPIAvailability('jsonBetaAPI'))
self.assertEqual(
AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[20], 20)),
self._avail_finder.GetAPIAvailability('jsonStableAPI'))
# Testing a whitelisted API.
self.assertEquals(
AvailabilityInfo(ChannelInfo('beta', CANNED_BRANCHES[27], 27)),
self._avail_finder.GetAPIAvailability('declarativeWebRequest'))
# Testing APIs found only by checking file system existence.
self.assertEquals(
AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[23], 23)),
self._avail_finder.GetAPIAvailability('windows'))
self.assertEquals(
AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[18], 18)),
self._avail_finder.GetAPIAvailability('tabs'))
self.assertEquals(
AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[18], 18)),
self._avail_finder.GetAPIAvailability('input.ime'))
# Testing API channel existence for _api_features.json.
# Listed as 'dev' on |beta|, 'dev' on |dev|.
self.assertEquals(
AvailabilityInfo(ChannelInfo('dev', CANNED_BRANCHES[28], 28)),
self._avail_finder.GetAPIAvailability('systemInfo.stuff'))
# Listed as 'stable' on |beta|.
self.assertEquals(
AvailabilityInfo(
ChannelInfo('beta', CANNED_BRANCHES[27], 27),
scheduled=28),
self._avail_finder.GetAPIAvailability('systemInfo.cpu'))
# Testing API channel existence for _manifest_features.json.
# Listed as 'trunk' on all channels.
self.assertEquals(
AvailabilityInfo(ChannelInfo('trunk', 'trunk', 'trunk')),
self._avail_finder.GetAPIAvailability('sync'))
# No records of API until |trunk|.
self.assertEquals(
AvailabilityInfo(ChannelInfo('trunk', 'trunk', 'trunk')),
self._avail_finder.GetAPIAvailability('history'))
# Listed as 'dev' on |dev|.
self.assertEquals(
AvailabilityInfo(ChannelInfo('dev', CANNED_BRANCHES[28], 28)),
self._avail_finder.GetAPIAvailability('storage'))
# Stable in _manifest_features and into pre-18 versions.
self.assertEquals(
AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[8], 8)),
self._avail_finder.GetAPIAvailability('pageAction'))
# Testing API channel existence for _permission_features.json.
# Listed as 'beta' on |trunk|.
self.assertEquals(
AvailabilityInfo(ChannelInfo('trunk', 'trunk', 'trunk')),
self._avail_finder.GetAPIAvailability('falseBetaAPI'))
# Listed as 'trunk' on |trunk|.
self.assertEquals(
AvailabilityInfo(ChannelInfo('trunk', 'trunk', 'trunk')),
self._avail_finder.GetAPIAvailability('trunkAPI'))
# Listed as 'trunk' on all development channels.
self.assertEquals(
AvailabilityInfo(ChannelInfo('trunk', 'trunk', 'trunk')),
self._avail_finder.GetAPIAvailability('declarativeContent'))
# Listed as 'dev' on all development channels.
self.assertEquals(
AvailabilityInfo(ChannelInfo('dev', CANNED_BRANCHES[28], 28)),
self._avail_finder.GetAPIAvailability('bluetooth'))
# Listed as 'dev' on |dev|.
self.assertEquals(
AvailabilityInfo(ChannelInfo('dev', CANNED_BRANCHES[28], 28)),
self._avail_finder.GetAPIAvailability('cookies'))
# Treated as 'stable' APIs.
self.assertEquals(
AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[24], 24)),
self._avail_finder.GetAPIAvailability('alarms'))
self.assertEquals(
AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[21], 21)),
self._avail_finder.GetAPIAvailability('bookmarks'))
# Testing older API existence using extension_api.json.
self.assertEquals(
AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[6], 6)),
self._avail_finder.GetAPIAvailability('menus'))
self.assertEquals(
AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[5], 5)),
self._avail_finder.GetAPIAvailability('idle'))
# Switches between _features.json files across branches.
# Listed as 'trunk' on all channels, in _api, _permission, or _manifest.
self.assertEquals(
AvailabilityInfo(ChannelInfo('trunk', 'trunk', 'trunk')),
self._avail_finder.GetAPIAvailability('contextMenus'))
# Moves between _permission and _manifest as file system is traversed.
self.assertEquals(
AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[23], 23)),
self._avail_finder.GetAPIAvailability('systemInfo.display'))
self.assertEquals(
AvailabilityInfo(ChannelInfo('stable', CANNED_BRANCHES[17], 17)),
self._avail_finder.GetAPIAvailability('webRequest'))
# Mid-upgrade cases:
# Listed as 'dev' on |beta| and 'beta' on |dev|.
self.assertEquals(
AvailabilityInfo(ChannelInfo('dev', CANNED_BRANCHES[28], 28)),
self._avail_finder.GetAPIAvailability('notifications'))
# Listed as 'beta' on |stable|, 'dev' on |beta| ... until |stable| on trunk.
self.assertEquals(
AvailabilityInfo(ChannelInfo('trunk', 'trunk', 'trunk')),
self._avail_finder.GetAPIAvailability('events'))
def testGetAPINodeAvailability(self):
# Allow the LookupResult constructions below to take just one line.
lookup_result = api_schema_graph.LookupResult
availability_graph = self._node_avail_finder.GetAPINodeAvailability('tabs')
self.assertEquals(
lookup_result(True, self._branch_utility.GetChannelInfo('trunk')),
availability_graph.Lookup('tabs', 'properties',
'fakeTabsProperty3'))
self.assertEquals(
lookup_result(True, self._branch_utility.GetChannelInfo('dev')),
availability_graph.Lookup('tabs', 'events', 'onActivated',
'parameters', 'activeInfo', 'properties',
'windowId'))
self.assertEquals(
lookup_result(True, self._branch_utility.GetChannelInfo('dev')),
availability_graph.Lookup('tabs', 'events', 'onUpdated', 'parameters',
'tab'))
self.assertEquals(
lookup_result(True, self._branch_utility.GetChannelInfo('beta')),
availability_graph.Lookup('tabs', 'events','onActivated'))
self.assertEquals(
lookup_result(True, self._branch_utility.GetChannelInfo('beta')),
availability_graph.Lookup('tabs', 'functions', 'get', 'parameters',
'tabId'))
self.assertEquals(
lookup_result(True, self._branch_utility.GetChannelInfo('stable')),
availability_graph.Lookup('tabs', 'types', 'InjectDetails',
'properties', 'code'))
self.assertEquals(
lookup_result(True, self._branch_utility.GetChannelInfo('stable')),
availability_graph.Lookup('tabs', 'types', 'InjectDetails',
'properties', 'file'))
self.assertEquals(
lookup_result(True, self._branch_utility.GetStableChannelInfo(25)),
availability_graph.Lookup('tabs', 'types', 'InjectDetails'))
# Nothing new in version 24 or 23.
self.assertEquals(
lookup_result(True, self._branch_utility.GetStableChannelInfo(22)),
availability_graph.Lookup('tabs', 'types', 'Tab', 'properties',
'windowId'))
self.assertEquals(
lookup_result(True, self._branch_utility.GetStableChannelInfo(21)),
availability_graph.Lookup('tabs', 'types', 'Tab', 'properties',
'selected'))
# Nothing new in version 20.
self.assertEquals(
lookup_result(True, self._branch_utility.GetStableChannelInfo(19)),
availability_graph.Lookup('tabs', 'functions', 'getCurrent'))
self.assertEquals(
lookup_result(True, self._branch_utility.GetStableChannelInfo(18)),
availability_graph.Lookup('tabs', 'types', 'Tab', 'properties',
'index'))
self.assertEquals(
lookup_result(True, self._branch_utility.GetStableChannelInfo(17)),
availability_graph.Lookup('tabs', 'events', 'onUpdated', 'parameters',
'changeInfo'))
# Nothing new in version 16.
self.assertEquals(
lookup_result(True, self._branch_utility.GetStableChannelInfo(15)),
availability_graph.Lookup('tabs', 'properties',
'fakeTabsProperty2'))
# Everything else is available at the API's release, version 14 here.
self.assertEquals(
lookup_result(True, self._branch_utility.GetStableChannelInfo(14)),
availability_graph.Lookup('tabs', 'types', 'Tab'))
self.assertEquals(
lookup_result(True, self._branch_utility.GetStableChannelInfo(14)),
availability_graph.Lookup('tabs', 'types', 'Tab',
'properties', 'url'))
self.assertEquals(
lookup_result(True, self._branch_utility.GetStableChannelInfo(14)),
availability_graph.Lookup('tabs', 'properties',
'fakeTabsProperty1'))
self.assertEquals(
lookup_result(True, self._branch_utility.GetStableChannelInfo(14)),
availability_graph.Lookup('tabs', 'functions', 'get', 'parameters',
'callback'))
self.assertEquals(
lookup_result(True, self._branch_utility.GetStableChannelInfo(14)),
availability_graph.Lookup('tabs', 'events', 'onUpdated'))
# Test things that aren't available.
self.assertEqual(lookup_result(False, None),
availability_graph.Lookup('tabs', 'types',
'UpdateInfo'))
self.assertEqual(lookup_result(False, None),
availability_graph.Lookup('tabs', 'functions', 'get',
'parameters', 'callback',
'parameters', 'tab', 'id'))
self.assertEqual(lookup_result(False, None),
availability_graph.Lookup('functions'))
self.assertEqual(lookup_result(False, None),
availability_graph.Lookup('events', 'onActivated',
'parameters', 'activeInfo',
'tabId'))
if __name__ == '__main__':
unittest.main()
| boundarydevices/android_external_chromium_org | chrome/common/extensions/docs/server2/availability_finder_test.py | Python | bsd-3-clause | 15,139 |
from sqlalchemy import *
from sqlalchemy.ext.declarative import *
from sqlalchemy.orm import sessionmaker
class DataAccess():
def __init__(self):
self.engine = create_engine('mysql+pymysql://root:root@localhost:3306/votaapp?charset=utf8', encoding='utf-8')
base = declarative_base()
base.metadata.bind = self.engine
session = sessionmaker()
session.bind = self.engine
self.session = session()
def get_session(self):
return self.session
def get_engine(self):
return self.engine
def get_data_access_instance():
return data_access
data_access = DataAccess() | villegabriel/votaapp | clases/data/DataAccess.py | Python | gpl-3.0 | 642 |
# ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
__author__ = 'Brian Wickman'
from .server import HttpServer
__all__ = [
'HttpServer',
]
| imsut/commons | src/python/twitter/common/http/__init__.py | Python | apache-2.0 | 993 |
import os
from string import Template
from PyQt4.QtCore import QUrl
def get_template(name):
"""
Return a string Template for the given html file
:param name: The name of the template to return
:return: a string Template
"""
html = os.path.join(os.path.dirname(__file__), "{}.html".format(name))
with open(html) as f:
return Template(f.read())
base = os.path.dirname(os.path.abspath(__file__))
baseurl = QUrl.fromLocalFile(base + '/')
| lmotta/Roam | src/roam/templates/__init__.py | Python | gpl-2.0 | 474 |
#!/usr/bin/env python
# coding:utf-8
# by Samuel Chen
import sys
import os
import time
import subprocess
from cStringIO import StringIO
import pycurl
import urllib
import json
from collections import namedtuple
from sqlalchemy import create_engine
from email import Email
import setting_watch_dog as setting
os.chdir(setting._WORKDIR)
ALL_FAIL_NOTIFICATIONS = setting.ALL_FAIL_NOTIFICATIONS
ONE_FAIL_NOTIFICATIONS = setting.ONE_FAIL_NOTIFICATIONS
ALL_FAIL_ALERTS = []
ONE_FAIL_ALERTS = []
# check point & parameters
_HTTP = setting._HTTP
_WEB = setting._WEB
_CACHE = setting._CACHE
_SOLR = setting._SOLR
_DB_CHECK_KEY = setting._DB_CHECK_KEY
_DB_CHECK_VALUE = setting._DB_CHECK_VALUE
_DB = setting._DB
_API = setting._API
_JOB = setting._JOB
_DMS = setting._DMS
_UA = setting._UA
_TIMEOUTS = setting._TIMEOUTS
_TIMEOUT_ERRORS = setting._TIMEOUT_ERRORS
_SUCCEED_CODE = setting._SUCCEED_CODE
_REPORTS = {
'http': [],
'web': [],
'cache': [],
'solr': [],
'api': [],
'db': [],
'job': [],
'dms': [],
}
def check_web():
all_succeed = True
all_fail = True
reports = _REPORTS['web']
reports.append('')
reports.append('========== WEB APP Check ===========')
urls = _WEB['urls']
for url in urls:
reports.append('')
succeed = check_url(url, reports)
all_succeed = all_succeed and succeed
if succeed: all_fail = False
# alert if required
print succeed and 'Succeed' or 'Fail'
reports.append('========== WEB APP END ===========')
# alert
if all_fail:
ALL_FAIL_ALERTS.append('All tests FAILED for WEB APP')
elif not all_succeed:
ONE_FAIL_ALERTS.append('At least one test FAILED for WEB APP')
return all_succeed
def check_http():
all_succeed = True
all_fail = True
reports = _REPORTS['http']
reports.append('')
reports.append('========== HTTP Check ===========')
urls = _HTTP['urls']
for url in urls:
reports.append('')
succeed = check_url(url, reports)
all_succeed = all_succeed and succeed
if succeed: all_fail = False
# alert if required
print succeed and 'Succeed' or 'Fail'
reports.append('========== HTTP END ===========')
# alert
if all_fail:
ALL_FAIL_ALERTS.append('All tests FAILED for HTTP access')
elif not all_succeed:
ONE_FAIL_ALERTS.append('At least one test FAILED for HTTP access')
return all_succeed
def check_url(url, reports, resp_callback=None, data={}, method='GET'):
'''
url: url to request
reports: reports array to append
data: data to POST. if specified, will use POST
resp_callback: callback function to validat result. taks 1 argument of reponse test.
'''
succeed = False
if not reports:
reports = _REPORTS['http']
ti = 0 # timeout index
ti_max = len(_TIMEOUTS)
retries = 0 # retry count
print '>>>>> checking %s' % url
reports.append('check %s' % url)
rc = None
while not succeed:
if ti >= ti_max: break
timeout = _TIMEOUTS[ti]
rc = request(url, timeout, data=data, method=method)
#print rc
error = rc['error']
errstr = rc['errstr']
if error != 0:
retries += 1
if error in _TIMEOUT_ERRORS:
ti += 1
msg = 'Retry %d. %s' % (retries, errstr)
reports.append(msg)
print (msg)
else:
succeed = True
break
reports.append('Finished. http-code:%d time-used:%2f' % (rc['code'], rc['time']))
if succeed and not rc['code'] in _SUCCEED_CODE:
succeed = False
if succeed and None != resp_callback:
succeed = resp_callback(rc['response'])
reports.append(succeed and '*** SUCCEED ***' or '*** FAIL ***')
return succeed
def check_cache():
all_succeed = True
all_fail = False
succeed = False
checker = _CACHE['checker']
workpath = _CACHE['workpath']
servers = _CACHE['cluster']
user = _CACHE['name']
passwd = _CACHE['passwd']
reports = _REPORTS['cache']
reports.append('')
reports.append('========== CACHE Check ===========')
p = subprocess.Popen([checker, servers, user, passwd], cwd=workpath, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
rep = p.stdout.readlines()
log = p.stderr.read()
print log
if rep[-1].startswith('SUCCEED'): succeed = True
all_succeed = all_succeed and succeed
if not succeed: all_fail = True
reports.extend(rep)
reports.append('========== CACHE END ===========')
# if not all_succeed, alert
if all_fail:
ALL_FAIL_ALERTS.append('All tests FAILED for CACHE service')
elif not all_succeed:
ONE_FAIL_ALERTS.append('At least one test FAILED for CACHE service')
return all_succeed
def check_solr():
all_succeed = True
all_fail = True
reports = _REPORTS['solr']
reports.append('')
reports.append('========== Solr Check ===========')
queries = _SOLR['queries']
def validate_solr(resp, core):
succeed = True
print 'validating result ... '
try:
result = json.loads(resp)
num = result['response']['numFound']
print 'Found %d results in %s' % (num, core)
reports.append('Found %d results in %s' % (num, core))
if num < 10:
succeed = False
except Exception, ex:
print 'Expected exception:', ex
succeed = False
print succeed and 'Succeed validation' or 'Fail validation'
return succeed
for url, core in queries:
reports.append('')
reports.append('*** CORE %s ***' % core)
succeed = check_url(url, reports, lambda resp: validate_solr(resp, core))
all_succeed = all_succeed and succeed
if succeed: all_fail = False
# alert if required
reports.append('========== SOLR END ===========')
# alert
if all_fail:
ALL_FAIL_ALERTS.append('All tests FAILED for SOLR service')
elif not all_succeed:
ONE_FAIL_ALERTS.append('At least one test FAILED for SOLR service')
return all_succeed
def check_api():
all_succeed = True
all_fail = True
reports = _REPORTS['api']
reports.append('')
reports.append('========== API Check ===========')
apis = _API['apis']
def validate_api(resp, api):
succeed = True
print 'validating result ... '
try:
result = json.loads(resp)
#print result
status = result['status']
msg = result['msg']
print 'API %s got result status:%s msg:%s' % (api, status, msg)
if api == 'login':
token = result['data']['access_token']
memid = result['data']['memid']
_API['token'] = token
_API['memid'] = memid
print 'Logged-in as %s. token:%s' % (memid, token)
elif api == 'get_followed':
orgid = result['data']['info'][0]['org_id']
orgname = result['data']['info'][0]['org_name']
_API['orgid'] = orgid
_API['orgname'] = orgname
print 'Get 1st followed organization "%s".' % org_name
reports.append('API %s got result status:%s msg:%s' % (api, status, msg))
except Exception, ex:
print 'Expected exception:', ex
succeed = False
print succeed and 'Succeed validation' or 'Fail validation'
return succeed
for api, method, url, data in apis:
print method, api
for k, v in data.items():
if v.startswith('$'):
data[k] = _API[v[1:]]
print k, data[k]
data['access_token'] = _API['token']
print data['access_token']
reports.append('')
reports.append('*** API %s ***' % api)
succeed = check_url(url, reports, lambda resp: validate_api(resp, api), data=data, method=method)
all_succeed = all_succeed and succeed
if succeed: all_fail = False
# alert if required
reports.append('========== API END ===========')
# if not all_succeed, alert
if all_fail:
ALL_FAIL_ALERTS.append('All tests FAILED for API service')
elif not all_succeed:
ONE_FAIL_ALERTS.append('At least one test FAILED for API service')
return all_succeed
def check_db():
all_succeed = True
all_fail = True
reports = _REPORTS['db']
reports.append('')
reports.append('========== DB Check ===========')
connections = _DB['connections']
queries = _DB['queries']
def validate_query(query):
succeed = True
q = query.strip().split()
if len(q) < 2:
print 'Warn: query is too short.'
#return False
msg = ''
cmd = q[0].upper()
if cmd == 'SELECT':
if not query.upper().find(' LIMIT '):
print ('No "LIMIT" clause found in "SELECT".')
reports.append('No "LIMIT" clause found in "SELECT".')
succeed = False
elif cmd == 'INSERT':
if not query.upper().find(' WHERE '):
print ('No "WHERE" clause found in "INSERT".')
reports.append('No "WHERE" clause found in "INSERT".')
succeed = False
elif cmd == 'UPDATE':
if not query.upper().find(' WHERE '):
print ('No "WHERE" clause found in "UPDATE".')
reports.append('No "WHERE" clause found in "UPDATE".')
succeed = False
elif cmd == 'DELETE':
if not query.upper().find(' WHERE '):
print ('No "WHERE" clause found in "DELETE".')
reports.append('No "WHERE" clause found in "DELETE".')
succeed = False
elif cmd == 'SHOW':
msg = 'No error'
pass
else:
msg = 'No error'
pass
print succeed and 'Succeed validating query' or 'Fail validating query'
return succeed
for dbconf, desc, query, expected_rowcount, expected_values in queries:
reports.append('')
reports.append('*** DB %s - %s ***' % (dbconf, desc))
succeed = True
conn = None
try:
engine = create_engine(connections[dbconf])
conn = engine.connect()
print '>> validating %s on "%s"... ' % (desc, dbconf)
succeed = validate_query(query)
rs = conn.execute(query)
if expected_rowcount >= 0 and rs.rowcount != expected_rowcount:
reports.append('Expected effecting %d row(s) but got %d row(s)' % (expected_rowcount, rs.rowcount))
succeed = False
if expected_values:
for k, v in expected_values.items():
if rs[k] != v:
reports.append('col "%s" is expected "%s" but got "%s"' % (k, str(v), str(rs[k])))
succeed = False
print succeed and 'Succeed validating result' or 'Fail validating result'
reports.append(succeed and 'SUCCEED' or 'SUCCEED')
except Exception,ex:
succeed = False
reports.append('Exception: %s' % ex)
print 'Exception:', ex
finally:
if conn: conn.close()
all_succeed = all_succeed and succeed
if succeed: all_fail = False
# alert if required
reports.append('========== DB END ===========')
# if not all_succeed, alert
if all_fail:
ALL_FAIL_ALERTS.append('All tests FAILED for DB service')
elif not all_succeed:
ONE_FAIL_ALERTS.append('At least one test FAILED for DB service')
return all_succeed
def check_job():
pass
def check_dms():
pass
def check():
t1 = time.time()
print check_http() and '@@@ HTTP SUCCEED' or '@@@ HTTP FAIL'
print check_web() and '@@@ WEB SUCCEED' or '@@@ WEB FAIL'
print check_solr() and '@@@ SOLR SUCCEED' or '@@@ SOLR FAIL'
print check_api() and '@@@ API SUCCEED' or '@@@ API FAIL'
print check_db() and '@@@ DB SUCCEED' or '@@@ DB FAIL'
print check_cache() and '@@@ CACHE SUCCEED' or '@@@ CACHE FAIL'
t2 = time.time()
print "Used %d seconds" % (t2-t1)
f = open('watch-dog-report.txt', 'w')
for rep in _REPORTS.values():
for line in rep:
f.write(line)
f.write('\r\n')
f.close()
attachments = []
attachments.append(('report.txt','./watch-dog-report.txt'))
if len(ALL_FAIL_ALERTS) > 0:
print 'sending ALL_FAIL_ALERT ...'
sendalert(ALL_FAIL_ALERTS, attachments, is_all_fail=True)
if len(ONE_FAIL_ALERTS) > 0:
print 'sending ONE_FAIL_ALERT ...'
sendalert(ONE_FAIL_ALERTS, attachments, is_all_fail=False)
return
def sendalert(alerts, attachments=[], is_all_fail=False):
email = Email()
emails = ONE_FAIL_NOTIFICATIONS['emails']
if is_all_fail:
emails = ALL_FAIL_NOTIFICATIONS['emails']
p = subprocess.Popen(['hostname'], stdout=subprocess.PIPE)
hostname = p.stdout.read()
report = StringIO()
for x in alerts:
print x
report.writelines([x, '\r\n\r\n']);
body = report.getvalue()
report.close()
subject = '[WARN] At least one tested failed - %s - %s' % (hostname, time.ctime())
if is_all_fail:
subject = '[SERVE] All TEST FAILED for a service - %s - %s' % (hostname, time.ctime())
email.sender = 'Gagein <noreply@gagein.com>'
retries = 3
while retries > 0:
try:
email.send(emails, subject, body, '', attachments)
retries = 0
except Exception, ex:
retries = retries - 1
print '... Retry due to exception: ', ex
def request(url, timeout, data={}, method='GET', **options):
rc = {'error':0}
buf = StringIO()
curl = pycurl.Curl()
curl.setopt(pycurl.URL, url)
curl.setopt(pycurl.USERAGENT, _UA)
curl.setopt(pycurl.FOLLOWLOCATION, 1)
curl.setopt(pycurl.MAXREDIRS, 5)
#curl.setopt(pycurl.CONNECTTIMEOUT, 30)
curl.setopt(pycurl.TIMEOUT, timeout)
curl.setopt(pycurl.WRITEFUNCTION, buf.write)
if data:
curl.setopt(curl.POSTFIELDS, urllib.urlencode(data))
if method == 'GET':
curl.setopt(curl.HTTPGET, 1)
elif method == 'POST':
curl.setopt(curl.POST, 1)
elif method == 'PUT':
curl.setopt(curl.PUT, 1)
else:
rc['error'] = -1
rc['errstr'] = 'HTTP method "%s" is not supported yet' % method
print 'ERROR: %s' % rc['errstr']
return rc
try:
for k,v in options.items():
curl.setopt(k, v)
except:
pass
try:
curl.perform()
except Exception, ex:
rc['error'] = ex[0]
#print ex
rc['errstr'] = curl.errstr()
rc['code'] = curl.getinfo(curl.HTTP_CODE)
rc['time'] = curl.getinfo(curl.TOTAL_TIME)
rc['response'] = buf.getvalue()
#print rc['response']
buf.close()
curl.close()
return rc
if __name__ == '__main__':
check()
| samuelchen/server-monitor | watch-dog.py | Python | gpl-2.0 | 15,308 |
"""
Taken from
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
import pathlib
SETUP_PY_DIRECTORY = pathlib.Path(__file__).resolve().parent
# Get the long description from the README file
with open(SETUP_PY_DIRECTORY / 'README.md', encoding='utf-8') as f:
long_description = f.read()
with open(SETUP_PY_DIRECTORY / 'requirements.txt', encoding='utf-8') as f:
requirements_file_lines = f.read().strip().splitlines()
setup(
name='iwant_bot',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.0',
description='A sample Python project',
long_description=long_description,
# The project's main homepage.
url='https://github.com/kiwicom/iwant-bot',
# Author details
author='kiwi.com interns',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Communications :: Chat :: Slack',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='slack chatbot',
package_dir={"": "src"},
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages("src", exclude=['docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=requirements_file_lines,
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'test': ['pytest'],
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'sample=sample:main',
],
},
zip_safe=True,
)
| kiwicom/iwant-bot | setup.py | Python | mit | 3,191 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-17 02:21
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserDB',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='tsadmuser', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'User',
'verbose_name_plural': 'Users',
},
),
]
| tsadm/webapp | src/tsadmuser/migrations/0001_initial.py | Python | bsd-3-clause | 896 |
#!/usr/bin/env python
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import print_function
from twisted.spread import pb
from twisted.internet import reactor
from twisted.cred import credentials
class Client(pb.Referenceable):
def remote_print(self, message):
print(message)
def connect(self):
factory = pb.PBClientFactory()
reactor.connectTCP("localhost", 8800, factory)
def1 = factory.login(credentials.UsernamePassword("alice", "1234"),
client=self)
def1.addCallback(self.connected)
reactor.run()
def connected(self, perspective):
print("connected, joining group #NeedAFourth")
# this perspective is a reference to our User object. Save a reference
# to it here, otherwise it will get garbage collected after this call,
# and the server will think we logged out.
self.perspective = perspective
d = perspective.callRemote("joinGroup", "#NeedAFourth")
d.addCallback(self.gotGroup)
def gotGroup(self, group):
print("joined group, now sending a message to all members")
# 'group' is a reference to the Group object (through a ViewPoint)
d = group.callRemote("send", "You can call me Al.")
d.addCallback(self.shutdown)
def shutdown(self, result):
reactor.stop()
Client().connect()
| EricMuller/mywebmarks-backend | requirements/twisted/Twisted-17.1.0/docs/core/howto/listings/pb/chatclient.py | Python | mit | 1,425 |
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
# Copyright (C) 2007 Oliver Charles
# Copyright (C) 2007-2011 Philipp Wolfer
# Copyright (C) 2007, 2010, 2011 Lukáš Lalinský
# Copyright (C) 2011 Michael Wiencek
# Copyright (C) 2011-2012 Wieland Hoffmann
# Copyright (C) 2013-2014 Laurent Monin
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import traceback
from picard.coverart.providers import cover_art_providers, CoverArtProvider
from functools import partial
from picard import config, log
from picard.coverart.image import (CoverArtImageIOError,
CoverArtImageIdentificationError)
from PyQt4.QtCore import QObject
class CoverArt:
def __init__(self, album, metadata, release):
self._queue_new()
self.album = album
self.metadata = metadata
self.release = release
self.front_image_found = False
def __repr__(self):
return "CoverArt for %r" % (self.album)
def retrieve(self):
"""Retrieve available cover art images for the release"""
if (not config.setting["save_images_to_tags"] and not
config.setting["save_images_to_files"]):
log.debug("Cover art disabled by user options.")
return
self.providers = cover_art_providers()
self.next_in_queue()
def _set_metadata(self, coverartimage, data):
try:
coverartimage.set_data(data)
if coverartimage.can_be_saved_to_metadata:
log.debug("Cover art image stored to metadata: %r [%s]" % (
coverartimage,
coverartimage.imageinfo_as_string())
)
self.metadata.append_image(coverartimage)
for track in self.album._new_tracks:
track.metadata.append_image(coverartimage)
# If the image already was a front image,
# there might still be some other non-CAA front
# images in the queue - ignore them.
if not self.front_image_found:
self.front_image_found = coverartimage.is_front_image()
else:
log.debug("Thumbnail for cover art image: %r [%s]" % (
coverartimage,
coverartimage.imageinfo_as_string())
)
except CoverArtImageIOError as e:
self.album.error_append(unicode(e))
self.album._finalize_loading(error=True)
raise e
except CoverArtImageIdentificationError as e:
self.album.error_append(unicode(e))
def _coverart_downloaded(self, coverartimage, data, http, error):
"""Handle finished download, save it to metadata"""
self.album._requests -= 1
if error:
self.album.error_append(u'Coverart error: %s' % (unicode(http.errorString())))
elif len(data) < 1000:
log.warning("Not enough data, skipping %s" % coverartimage)
else:
self._message(
N_("Cover art of type '%(type)s' downloaded for %(albumid)s from %(host)s"),
{
'type': coverartimage.types_as_string(),
'albumid': self.album.id,
'host': coverartimage.host
},
echo=None
)
try:
self._set_metadata(coverartimage, data)
except CoverArtImageIOError:
# It doesn't make sense to store/download more images if we can't
# save them in the temporary folder, abort.
return
self.next_in_queue()
def next_in_queue(self):
"""Downloads next item in queue.
If there are none left, loading of album will be finalized.
"""
if self.album.id not in self.album.tagger.albums:
# album removed
return
if (self.front_image_found and
config.setting["save_images_to_tags"] and not
config.setting["save_images_to_files"] and
config.setting["save_only_front_images_to_tags"]):
# no need to continue
self.album._finalize_loading(None)
return
if self._queue_empty():
if self.providers:
# requeue from next provider
provider = self.providers.pop(0)
ret = CoverArtProvider._STARTED
try:
p = provider(self)
if p.enabled():
log.debug("Trying cover art provider %s ..." %
provider.NAME)
ret = p.queue_images()
else:
log.debug("Skipping cover art provider %s ..." %
provider.NAME)
except:
log.error(traceback.format_exc())
raise
finally:
if ret != CoverArtProvider.WAIT:
self.next_in_queue()
return
else:
# nothing more to do
self.album._finalize_loading(None)
return
# We still have some items to try!
coverartimage = self._queue_get()
if not coverartimage.support_types and self.front_image_found:
# we already have one front image, no need to try other type-less
# sources
log.debug("Skipping %r, one front image is already available",
coverartimage)
self.next_in_queue()
return
# local files
if hasattr(coverartimage, 'filepath'):
data = None
try:
with open(coverartimage.filepath, 'rb') as file:
self._set_metadata(coverartimage, file.read())
except IOError, (errnum, errmsg):
log.error("Failed to read %r: %s (%d)" %
(coverartimage.from_file, errmsg, errnum))
except CoverArtImageIOError:
# It doesn't make sense to store/download more images if we can't
# save them in the temporary folder, abort.
return
self.next_in_queue()
return
# on the web
self._message(
N_("Downloading cover art of type '%(type)s' for %(albumid)s from %(host)s ..."),
{
'type': coverartimage.types_as_string(),
'albumid': self.album.id,
'host': coverartimage.host
},
echo=None
)
log.debug("Downloading %r" % coverartimage)
self.album.tagger.xmlws.download(
coverartimage.host,
coverartimage.port,
coverartimage.path,
partial(self._coverart_downloaded, coverartimage),
priority=True,
important=False
)
self.album._requests += 1
def queue_put(self, coverartimage):
"Add an image to queue"
log.debug("Queuing cover art image %r", coverartimage)
self.__queue.append(coverartimage)
def _queue_get(self):
"Get next image and remove it from queue"
return self.__queue.pop(0)
def _queue_empty(self):
"Returns True if the queue is empty"
return not self.__queue
def _queue_new(self):
"Initialize the queue"
self.__queue = []
def _message(self, *args, **kwargs):
"""Display message to status bar"""
QObject.tagger.window.set_statusbar_message(*args, **kwargs)
def coverart(album, metadata, release):
"""Gets all cover art URLs from the metadata and then attempts to
download the album art. """
coverart = CoverArt(album, metadata, release)
log.debug("New %r", coverart)
coverart.retrieve()
| jvoegele/picard | picard/coverart/__init__.py | Python | gpl-2.0 | 8,616 |
# -*- test-case-name: twisted.test.test_log -*-
# Twisted, the Framework of Your Internet
# Copyright (C) 2001 Matthew W. Lefkowitz
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Logging and metrics infrastructure.
"""
# System Imports
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import sys
import time
import threadable
import failure
import warnings
# Sibling Imports
import context
class ILogContext:
"""Actually, this interface is just a synoym for the dictionary interface,
but it serves as a key for the default information in a log.
I do not inherit from Interface because the world is a cruel place.
"""
context.setDefault(ILogContext,
{"isError": 0,
"system": "-"})
def callWithContext(ctx, func, *args, **kw):
newCtx = context.get(ILogContext).copy()
newCtx.update(ctx)
return context.call({ILogContext: newCtx}, func, *args, **kw)
def callWithLogger(logger, func, *args, **kw):
"""
Utility method which wraps a function in a try:/except:, logs a failure if
one occurrs, and uses the system's logPrefix.
"""
try:
lp = logger.logPrefix()
except:
lp = '(buggy logPrefix method)'
err(system=lp)
try:
callWithContext({"system": lp}, func, *args, **kw)
except:
err(system=lp)
def write(stuff):
"""Write some data to the log."""
warnings.warn("What the hell is wrong with you?", DeprecationWarning)
msg(str(stuff))
def debug(*stuff,**otherstuff):
"""
Write some data to the log, indented, so it's easier to
distinguish from 'normal' output.
"""
msg(debug=1, *stuff, **otherstuff)
def showwarning(message, category, filename, lineno, file=None):
if file is None:
msg(warning=message, category=category, filename=filename, lineno=lineno,
format="%(filename)s:%(lineno)s: %(category)s: %(warning)s")
else:
_oldshowwarning(message, category, filename, lineno, filename)
_keepErrors = 0
_keptErrors = []
_ignoreErrors = []
def startKeepingErrors():
"""Support function for testing frameworks.
Start keeping errors in a buffer which can be retrieved (and emptied) with
flushErrors.
"""
global _keepErrors
_keepErrors = 1
def flushErrors(*errorTypes):
"""Support function for testing frameworks.
Return a list of errors that occurred since the last call to flushErrors().
(This will return None unless startKeepingErrors has been called.)
"""
global _keptErrors
k = _keptErrors
_keptErrors = []
if errorTypes:
for erk in k:
shouldReLog = 1
for errT in errorTypes:
if erk.check(errT):
shouldReLog = 0
if shouldReLog:
err(erk)
return k
def ignoreErrors(*types):
for type in types:
_ignoreErrors.append(type)
def clearIgnores():
global _ignoreErrors
_ignoreErrors = []
def err(_stuff=None,**kw):
"""Write a failure to the log.
"""
if _stuff is None:
_stuff = failure.Failure()
if isinstance(_stuff, failure.Failure):
if _keepErrors:
if _ignoreErrors:
keep = 0
for err in _ignoreErrors:
r = _stuff.check(err)
if r:
keep = 0
break
else:
keep = 1
if keep:
_keptErrors.append(_stuff)
else:
_keptErrors.append(_stuff)
msg(failure=_stuff, isError=1, **kw)
elif isinstance(_stuff, Exception):
msg(failure=failure.Failure(_stuff), isError=1, **kw)
else:
msg(repr(_stuff), isError=1, **kw)
deferr = err
class Logger:
"""
This represents a class which may 'own' a log. Used by subclassing.
"""
def logPrefix(self):
"""
Override this method to insert custom logging behavior. Its
return value will be inserted in front of every line. It may
be called more times than the number of output lines.
"""
return '-'
class EscapeFromTheMeaninglessConfinesOfCapital:
def own(self, owner):
warnings.warn("Foolish capitalist! Your opulent toilet will be your undoing!!",
DeprecationWarning, stacklevel=2)
def disown(self, owner):
warnings.warn("The proletariat is victorious.",
DeprecationWarning, stacklevel=2)
logOwner = EscapeFromTheMeaninglessConfinesOfCapital()
class LogPublisher:
"""Class for singleton log message publishing."""
synchronized = ['msg']
def __init__(self):
self.observers = []
def addObserver(self, other):
"""Add a new observer.
Observers are callable objects that will be called with each new log
message (a dict).
"""
self.observers.append(other)
def removeObserver(self, other):
"""Remove an observer."""
self.observers.remove(other)
def msg(self, *message, **kw):
"""Log a new message."""
actualEventDict = (context.get(ILogContext) or {}).copy()
actualEventDict.update(kw)
actualEventDict['message'] = message
actualEventDict['time'] = time.time()
for o in self.observers:
o(actualEventDict)
try:
theLogPublisher
except NameError:
theLogPublisher = LogPublisher()
addObserver = theLogPublisher.addObserver
removeObserver = theLogPublisher.removeObserver
msg = theLogPublisher.msg
def initThreads():
global msg
# after the log publisher is synchronized, grab its method again so we get
# the hooked version
msg = theLogPublisher.msg
threadable.synchronize(LogPublisher)
threadable.whenThreaded(initThreads)
class FileLogObserver:
"""Log observer that writes to a file-like object."""
def __init__(self, f):
self.write = f.write
self.flush = f.flush
def _emit(self, eventDict):
edm = eventDict['message']
if not edm:
if eventDict['isError'] and eventDict.has_key('failure'):
text = eventDict['failure'].getTraceback()
elif eventDict.has_key('format'):
text = eventDict['format'] % eventDict
else:
text = ' '.join(map(str, edm))
y,mon,d,h,min, iigg,nnoo,rree,daylight = time.localtime(eventDict['time'])
self.write("%0.4d/%0.2d/%0.2d %0.2d:%0.2d %s [%s] %s\n" %
(y, mon, d, h, min, time.tzname[daylight],
eventDict['system'], text.replace("\n","\n\t")))
self.flush() # hoorj!
def start(self):
"""Start observing log events."""
addObserver(self._emit)
def stop(self):
"""Stop observing log events."""
removeObserver(self._emit)
class StdioOnnaStick:
"""Class that pretends to be stout/err."""
closed = 0
softspace = 0
mode = 'wb'
name = '<stdio (log)>'
def __init__(self, isError=0):
self.isError = isError
self.buf = ''
def close(self):
pass
def fileno(self):
return -1
def flush(self):
pass
def read(self):
raise IOError("can't read from the log!")
readline = read
readlines = read
seek = read
tell = read
def write(self, data):
d = (self.buf + data).split('\n')
self.buf = d[-1]
messages = d[0:-1]
for message in messages:
msg(message, printed=1, isError=self.isError)
def writelines(self, lines):
for line in lines:
msg(line, printed=1, isError=self.isError)
try:
_oldshowwarning
except NameError:
_oldshowwarning = None
def startLogging(file, setStdout=1):
"""Initialize logging to a specified file."""
global defaultObserver, _oldshowwarning
if not _oldshowwarning:
_oldshowwarning = warnings.showwarning
warnings.showwarning = showwarning
if defaultObserver:
defaultObserver.stop()
defaultObserver = None
flo = FileLogObserver(file)
flo.start()
msg("Log opened.")
if setStdout:
sys.stdout = logfile
sys.stderr = logerr
class NullFile:
softspace = 0
def read(self): pass
def write(self, bytes): pass
def flush(self): pass
def close(self): pass
def discardLogs():
"""Throw away all logs.
"""
global logfile
logfile = NullFile()
# Prevent logfile from being erased on reload. This only works in cpython.
try:
logfile
except NameError:
logfile = StdioOnnaStick(0)
logerr = StdioOnnaStick(1)
class DefaultObserver:
"""Default observer.
Will ignore all non-error messages and send error messages to sys.stderr.
Will be removed when startLogging() is called for the first time.
"""
def _emit(self, eventDict):
if eventDict["isError"]:
if eventDict.has_key('failure'):
text = eventDict['failure'].getTraceback()
else:
text = " ".join([str(m) for m in eventDict["message"]]) + "\n"
sys.stderr.write(text)
sys.stderr.flush()
def start(self):
addObserver(self._emit)
def stop(self):
removeObserver(self._emit)
try:
defaultObserver
except NameError:
defaultObserver = DefaultObserver()
defaultObserver.start()
| fxia22/ASM_xf | PythonD/site_python/twisted/python/log.py | Python | gpl-2.0 | 10,155 |
# Copyright (C) 2011-2014 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""REST for addresses."""
from __future__ import absolute_import, print_function,unicode_literals
__metaclass__ = type
__all__ = [
'AllAddresses',
'AnAddress',
'UserAddresses',
]
from operator import attrgetter
from zope.component import getUtility
from mailman.interfaces.address import (
ExistingAddressError, InvalidEmailAddressError)
from mailman.interfaces.usermanager import IUserManager
from mailman.rest.helpers import (
BadRequest, CollectionMixin, NotFound, bad_request, child, created, etag,
no_content, not_found, okay, path_to)
from mailman.rest.members import MemberCollection
from mailman.rest.preferences import Preferences
from mailman.rest.validator import Validator
from mailman.utilities.datetime import now
class _AddressBase(CollectionMixin):
"""Shared base class for address representations."""
def _resource_as_dict(self, address):
"""See `CollectionMixin`."""
# The canonical url for an address is its lower-cased version,
# although it can be looked up with either its original or lower-cased
# email address.
representation = dict(
email=address.email,
original_email=address.original_email,
registered_on=address.registered_on,
self_link=path_to('addresses/{0}'.format(address.email)),
)
# Add optional attributes. These can be None or the empty string.
if address.display_name:
representation['display_name'] = address.display_name
if address.verified_on:
representation['verified_on'] = address.verified_on
return representation
def _get_collection(self, request):
"""See `CollectionMixin`."""
return list(getUtility(IUserManager).addresses)
class AllAddresses(_AddressBase):
"""The addresses."""
def on_get(self, request, response):
"""/addresses"""
resource = self._make_collection(request)
okay(response, etag(resource))
class _VerifyResource:
"""A helper resource for verify/unverify POSTS."""
def __init__(self, address, action):
self._address = address
self._action = action
assert action in ('verify', 'unverify')
def on_post(self, request, response):
# We don't care about the POST data, just do the action.
if self._action == 'verify' and self._address.verified_on is None:
self._address.verified_on = now()
elif self._action == 'unverify':
self._address.verified_on = None
no_content(response)
class AnAddress(_AddressBase):
"""An address."""
def __init__(self, email):
"""Get an address by either its original or lower-cased email.
:param email: The email address of the `IAddress`.
:type email: string
"""
self._address = getUtility(IUserManager).get_address(email)
def on_get(self, request, response):
"""Return a single address."""
if self._address is None:
not_found(response)
else:
okay(response, self._resource_as_json(self._address))
@child()
def memberships(self, request, segments):
"""/addresses/<email>/memberships"""
if len(segments) != 0:
return BadRequest(), []
if self._address is None:
return NotFound(), []
return AddressMemberships(self._address)
@child()
def preferences(self, request, segments):
"""/addresses/<email>/preferences"""
if len(segments) != 0:
return NotFound(), []
if self._address is None:
return NotFound(), []
child = Preferences(
self._address.preferences,
'addresses/{0}'.format(self._address.email))
return child, []
@child()
def verify(self, request, segments):
"""/addresses/<email>/verify"""
if len(segments) != 0:
return BadRequest(), []
if self._address is None:
return NotFound(), []
child = _VerifyResource(self._address, 'verify')
return child, []
@child()
def unverify(self, request, segments):
"""/addresses/<email>/verify"""
if len(segments) != 0:
return BadRequest(), []
if self._address is None:
return NotFound(), []
child = _VerifyResource(self._address, 'unverify')
return child, []
class UserAddresses(_AddressBase):
"""The addresses of a user."""
def __init__(self, user):
self._user = user
super(UserAddresses, self).__init__()
def _get_collection(self, request):
"""See `CollectionMixin`."""
return sorted(self._user.addresses,
key=attrgetter('original_email'))
def on_get(self, request, response):
"""/addresses"""
if self._user is None:
not_found(response)
else:
okay(response, etag(self._make_collection(request)))
def on_post(self, request, response):
"""POST to /addresses
Add a new address to the user record.
"""
if self._user is None:
not_found(response)
return
user_manager = getUtility(IUserManager)
validator = Validator(email=unicode,
display_name=unicode,
_optional=('display_name',))
try:
address = user_manager.create_address(**validator(request))
except ValueError as error:
bad_request(response, str(error))
except InvalidEmailAddressError:
bad_request(response, b'Invalid email address')
except ExistingAddressError:
bad_request(response, b'Address already exists')
else:
# Link the address to the current user and return it.
address.user = self._user
created(response, path_to('addresses/{0}'.format(address.email)))
def membership_key(member):
# Sort first by mailing list, then by address, then by role.
return member.list_id, member.address.email, member.role.value
class AddressMemberships(MemberCollection):
"""All the memberships of a particular email address."""
def __init__(self, address):
super(AddressMemberships, self).__init__()
self._address = address
def _get_collection(self, request):
"""See `CollectionMixin`."""
# XXX Improve this by implementing a .memberships attribute on
# IAddress, similar to the way IUser does it.
#
# Start by getting the IUser that controls this address. For now, if
# the address is not controlled by a user, return the empty set.
# Later when we address the XXX comment, it will return some
# memberships. But really, it should not be legal to subscribe an
# address to a mailing list that isn't controlled by a user -- maybe!
user = getUtility(IUserManager).get_user(self._address.email)
if user is None:
return []
return sorted((member for member in user.memberships.members
if member.address == self._address),
key=membership_key)
| adam-iris/mailman | src/mailman/rest/addresses.py | Python | gpl-3.0 | 7,993 |
from setuptools import find_packages, setup
setup(
name="test",
version="0.1",
packages=find_packages(),
entry_points={
"console_scripts": ["python2_test = python2_test_package.__main__:main"]
},
)
| chipaca/snapcraft | tests/spread/plugins/v1/python/snaps/python-entry-point/python2/setup.py | Python | gpl-3.0 | 227 |
# -*- coding: utf-8 -*-
# Unittests for fixtures.
from __future__ import absolute_import
import os
import re
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.core import management
from django.core.management.base import CommandError
from django.core.management.commands.dumpdata import sort_dependencies
from django.db import transaction
from django.db.models import signals
from django.test import (TestCase, TransactionTestCase, skipIfDBFeature,
skipUnlessDBFeature)
from .models import (Animal, Stuff, Absolute, Parent, Child, Article, Widget,
Store, Person, Book, NKChild, RefToNKChild, Circle1, Circle2, Circle3,
ExternalDependency, Thingy)
pre_save_checks = []
def animal_pre_save_check(signal, sender, instance, **kwargs):
"A signal that is used to check the type of data loaded from fixtures"
pre_save_checks.append(
(
'Count = %s (%s)' % (instance.count, type(instance.count)),
'Weight = %s (%s)' % (instance.weight, type(instance.weight)),
)
)
class TestFixtures(TestCase):
def test_duplicate_pk(self):
"""
This is a regression test for ticket #3790.
"""
# Load a fixture that uses PK=1
management.call_command(
'loaddata',
'sequence',
verbosity=0,
commit=False
)
# Create a new animal. Without a sequence reset, this new object
# will take a PK of 1 (on Postgres), and the save will fail.
animal = Animal(
name='Platypus',
latin_name='Ornithorhynchus anatinus',
count=2,
weight=2.2
)
animal.save()
self.assertGreater(animal.id, 1)
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_pretty_print_xml(self):
"""
Regression test for ticket #4558 -- pretty printing of XML fixtures
doesn't affect parsing of None values.
"""
# Load a pretty-printed XML fixture with Nulls.
management.call_command(
'loaddata',
'pretty.xml',
verbosity=0,
commit=False
)
self.assertEqual(Stuff.objects.all()[0].name, None)
self.assertEqual(Stuff.objects.all()[0].owner, None)
@skipUnlessDBFeature('interprets_empty_strings_as_nulls')
def test_pretty_print_xml_empty_strings(self):
"""
Regression test for ticket #4558 -- pretty printing of XML fixtures
doesn't affect parsing of None values.
"""
# Load a pretty-printed XML fixture with Nulls.
management.call_command(
'loaddata',
'pretty.xml',
verbosity=0,
commit=False
)
self.assertEqual(Stuff.objects.all()[0].name, u'')
self.assertEqual(Stuff.objects.all()[0].owner, None)
def test_absolute_path(self):
"""
Regression test for ticket #6436 --
os.path.join will throw away the initial parts of a path if it
encounters an absolute path.
This means that if a fixture is specified as an absolute path,
we need to make sure we don't discover the absolute path in every
fixture directory.
"""
load_absolute_path = os.path.join(
os.path.dirname(__file__),
'fixtures',
'absolute.json'
)
management.call_command(
'loaddata',
load_absolute_path,
verbosity=0,
commit=False
)
self.assertEqual(Absolute.load_count, 1)
def test_unknown_format(self):
"""
Test for ticket #4371 -- Loading data of an unknown format should fail
Validate that error conditions are caught correctly
"""
stderr = StringIO()
management.call_command(
'loaddata',
'bad_fixture1.unkn',
verbosity=0,
commit=False,
stderr=stderr,
)
self.assertEqual(
stderr.getvalue(),
"Problem installing fixture 'bad_fixture1': unkn is not a known serialization format.\n"
)
def test_invalid_data(self):
"""
Test for ticket #4371 -- Loading a fixture file with invalid data
using explicit filename.
Validate that error conditions are caught correctly
"""
stderr = StringIO()
management.call_command(
'loaddata',
'bad_fixture2.xml',
verbosity=0,
commit=False,
stderr=stderr,
)
self.assertEqual(
stderr.getvalue(),
"No fixture data found for 'bad_fixture2'. (File format may be invalid.)\n"
)
def test_invalid_data_no_ext(self):
"""
Test for ticket #4371 -- Loading a fixture file with invalid data
without file extension.
Validate that error conditions are caught correctly
"""
stderr = StringIO()
management.call_command(
'loaddata',
'bad_fixture2',
verbosity=0,
commit=False,
stderr=stderr,
)
self.assertEqual(
stderr.getvalue(),
"No fixture data found for 'bad_fixture2'. (File format may be invalid.)\n"
)
def test_empty(self):
"""
Test for ticket #4371 -- Loading a fixture file with no data returns an error.
Validate that error conditions are caught correctly
"""
stderr = StringIO()
management.call_command(
'loaddata',
'empty',
verbosity=0,
commit=False,
stderr=stderr,
)
self.assertEqual(
stderr.getvalue(),
"No fixture data found for 'empty'. (File format may be invalid.)\n"
)
def test_abort_loaddata_on_error(self):
"""
Test for ticket #4371 -- If any of the fixtures contain an error,
loading is aborted.
Validate that error conditions are caught correctly
"""
stderr = StringIO()
management.call_command(
'loaddata',
'empty',
verbosity=0,
commit=False,
stderr=stderr,
)
self.assertEqual(
stderr.getvalue(),
"No fixture data found for 'empty'. (File format may be invalid.)\n"
)
def test_error_message(self):
"""
(Regression for #9011 - error message is correct)
"""
stderr = StringIO()
management.call_command(
'loaddata',
'bad_fixture2',
'animal',
verbosity=0,
commit=False,
stderr=stderr,
)
self.assertEqual(
stderr.getvalue(),
"No fixture data found for 'bad_fixture2'. (File format may be invalid.)\n"
)
def test_pg_sequence_resetting_checks(self):
"""
Test for ticket #7565 -- PostgreSQL sequence resetting checks shouldn't
ascend to parent models when inheritance is used
(since they are treated individually).
"""
management.call_command(
'loaddata',
'model-inheritance.json',
verbosity=0,
commit=False
)
self.assertEqual(Parent.objects.all()[0].id, 1)
self.assertEqual(Child.objects.all()[0].id, 1)
def test_close_connection_after_loaddata(self):
"""
Test for ticket #7572 -- MySQL has a problem if the same connection is
used to create tables, load data, and then query over that data.
To compensate, we close the connection after running loaddata.
This ensures that a new connection is opened when test queries are
issued.
"""
management.call_command(
'loaddata',
'big-fixture.json',
verbosity=0,
commit=False
)
articles = Article.objects.exclude(id=9)
self.assertEqual(
list(articles.values_list('id', flat=True)),
[1, 2, 3, 4, 5, 6, 7, 8]
)
# Just for good measure, run the same query again.
# Under the influence of ticket #7572, this will
# give a different result to the previous call.
self.assertEqual(
list(articles.values_list('id', flat=True)),
[1, 2, 3, 4, 5, 6, 7, 8]
)
def test_field_value_coerce(self):
"""
Test for tickets #8298, #9942 - Field values should be coerced into the
correct type by the deserializer, not as part of the database write.
"""
global pre_save_checks
pre_save_checks = []
signals.pre_save.connect(animal_pre_save_check)
try:
management.call_command(
'loaddata',
'animal.xml',
verbosity=0,
commit=False,
)
self.assertEqual(
pre_save_checks,
[
("Count = 42 (<type 'int'>)", "Weight = 1.2 (<type 'float'>)")
]
)
finally:
signals.pre_save.disconnect(animal_pre_save_check)
def test_dumpdata_uses_default_manager(self):
"""
Regression for #11286
Ensure that dumpdata honors the default manager
Dump the current contents of the database as a JSON fixture
"""
management.call_command(
'loaddata',
'animal.xml',
verbosity=0,
commit=False,
)
management.call_command(
'loaddata',
'sequence.json',
verbosity=0,
commit=False,
)
animal = Animal(
name='Platypus',
latin_name='Ornithorhynchus anatinus',
count=2,
weight=2.2
)
animal.save()
stdout = StringIO()
management.call_command(
'dumpdata',
'fixtures_regress.animal',
format='json',
stdout=stdout
)
# Output order isn't guaranteed, so check for parts
data = stdout.getvalue()
# Get rid of artifacts like '000000002' to eliminate the differences
# between different Python versions.
data = re.sub('0{6,}\d', '', data)
lion_json = '{"pk": 1, "model": "fixtures_regress.animal", "fields": {"count": 3, "weight": 1.2, "name": "Lion", "latin_name": "Panthera leo"}}'
emu_json = '{"pk": 10, "model": "fixtures_regress.animal", "fields": {"count": 42, "weight": 1.2, "name": "Emu", "latin_name": "Dromaius novaehollandiae"}}'
platypus_json = '{"pk": %d, "model": "fixtures_regress.animal", "fields": {"count": 2, "weight": 2.2, "name": "Platypus", "latin_name": "Ornithorhynchus anatinus"}}'
platypus_json = platypus_json % animal.pk
self.assertEqual(len(data), len('[%s]' % ', '.join([lion_json, emu_json, platypus_json])))
self.assertTrue(lion_json in data)
self.assertTrue(emu_json in data)
self.assertTrue(platypus_json in data)
def test_proxy_model_included(self):
"""
Regression for #11428 - Proxy models aren't included when you dumpdata
"""
stdout = StringIO()
# Create an instance of the concrete class
widget = Widget.objects.create(name='grommet')
management.call_command(
'dumpdata',
'fixtures_regress.widget',
'fixtures_regress.widgetproxy',
format='json',
stdout=stdout
)
self.assertEqual(
stdout.getvalue(),
"""[{"pk": %d, "model": "fixtures_regress.widget", "fields": {"name": "grommet"}}]"""
% widget.pk
)
def test_loaddata_works_when_fixture_has_forward_refs(self):
"""
Regression for #3615 - Forward references cause fixtures not to load in MySQL (InnoDB)
"""
management.call_command(
'loaddata',
'forward_ref.json',
verbosity=0,
commit=False
)
self.assertEqual(Book.objects.all()[0].id, 1)
self.assertEqual(Person.objects.all()[0].id, 4)
def test_loaddata_raises_error_when_fixture_has_invalid_foreign_key(self):
"""
Regression for #3615 - Ensure data with nonexistent child key references raises error
"""
stderr = StringIO()
management.call_command(
'loaddata',
'forward_ref_bad_data.json',
verbosity=0,
commit=False,
stderr=stderr,
)
self.assertTrue(
stderr.getvalue().startswith('Problem installing fixture')
)
def test_loaddata_no_fixture_specified(self):
"""
Regression for #7043 - Error is quickly reported when no fixtures is provided in the command line.
"""
stderr = StringIO()
management.call_command(
'loaddata',
verbosity=0,
commit=False,
stderr=stderr,
)
self.assertEqual(
stderr.getvalue(), 'No database fixture specified. Please provide the path of at least one fixture in the command line.\n'
)
def test_loaddata_not_existant_fixture_file(self):
stdout_output = StringIO()
management.call_command(
'loaddata',
'this_fixture_doesnt_exist',
verbosity=2,
commit=False,
stdout=stdout_output,
)
self.assertTrue("No xml fixture 'this_fixture_doesnt_exist' in" in
stdout_output.getvalue())
class NaturalKeyFixtureTests(TestCase):
def test_nk_deserialize(self):
"""
Test for ticket #13030 - Python based parser version
natural keys deserialize with fk to inheriting model
"""
management.call_command(
'loaddata',
'model-inheritance.json',
verbosity=0,
commit=False
)
management.call_command(
'loaddata',
'nk-inheritance.json',
verbosity=0,
commit=False
)
self.assertEqual(
NKChild.objects.get(pk=1).data,
'apple'
)
self.assertEqual(
RefToNKChild.objects.get(pk=1).nk_fk.data,
'apple'
)
def test_nk_deserialize_xml(self):
"""
Test for ticket #13030 - XML version
natural keys deserialize with fk to inheriting model
"""
management.call_command(
'loaddata',
'model-inheritance.json',
verbosity=0,
commit=False
)
management.call_command(
'loaddata',
'nk-inheritance.json',
verbosity=0,
commit=False
)
management.call_command(
'loaddata',
'nk-inheritance2.xml',
verbosity=0,
commit=False
)
self.assertEqual(
NKChild.objects.get(pk=2).data,
'banana'
)
self.assertEqual(
RefToNKChild.objects.get(pk=2).nk_fk.data,
'apple'
)
def test_nk_on_serialize(self):
"""
Check that natural key requirements are taken into account
when serializing models
"""
management.call_command(
'loaddata',
'forward_ref_lookup.json',
verbosity=0,
commit=False
)
stdout = StringIO()
management.call_command(
'dumpdata',
'fixtures_regress.book',
'fixtures_regress.person',
'fixtures_regress.store',
verbosity=0,
format='json',
use_natural_keys=True,
stdout=stdout,
)
self.assertEqual(
stdout.getvalue(),
"""[{"pk": 2, "model": "fixtures_regress.store", "fields": {"name": "Amazon"}}, {"pk": 3, "model": "fixtures_regress.store", "fields": {"name": "Borders"}}, {"pk": 4, "model": "fixtures_regress.person", "fields": {"name": "Neal Stephenson"}}, {"pk": 1, "model": "fixtures_regress.book", "fields": {"stores": [["Amazon"], ["Borders"]], "name": "Cryptonomicon", "author": ["Neal Stephenson"]}}]"""
)
def test_dependency_sorting(self):
"""
Now lets check the dependency sorting explicitly
It doesn't matter what order you mention the models
Store *must* be serialized before then Person, and both
must be serialized before Book.
"""
sorted_deps = sort_dependencies(
[('fixtures_regress', [Book, Person, Store])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_2(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Book, Store, Person])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_3(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Store, Book, Person])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_4(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Store, Person, Book])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_5(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Person, Book, Store])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_6(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Person, Store, Book])]
)
self.assertEqual(
sorted_deps,
[Store, Person, Book]
)
def test_dependency_sorting_dangling(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Person, Circle1, Store, Book])]
)
self.assertEqual(
sorted_deps,
[Circle1, Store, Person, Book]
)
def test_dependency_sorting_tight_circular(self):
self.assertRaisesMessage(
CommandError,
"""Can't resolve dependencies for fixtures_regress.Circle1, fixtures_regress.Circle2 in serialized app list.""",
sort_dependencies,
[('fixtures_regress', [Person, Circle2, Circle1, Store, Book])],
)
def test_dependency_sorting_tight_circular_2(self):
self.assertRaisesMessage(
CommandError,
"""Can't resolve dependencies for fixtures_regress.Circle1, fixtures_regress.Circle2 in serialized app list.""",
sort_dependencies,
[('fixtures_regress', [Circle1, Book, Circle2])],
)
def test_dependency_self_referential(self):
self.assertRaisesMessage(
CommandError,
"""Can't resolve dependencies for fixtures_regress.Circle3 in serialized app list.""",
sort_dependencies,
[('fixtures_regress', [Book, Circle3])],
)
def test_dependency_sorting_long(self):
self.assertRaisesMessage(
CommandError,
"""Can't resolve dependencies for fixtures_regress.Circle1, fixtures_regress.Circle2, fixtures_regress.Circle3 in serialized app list.""",
sort_dependencies,
[('fixtures_regress', [Person, Circle2, Circle1, Circle3, Store, Book])],
)
def test_dependency_sorting_normal(self):
sorted_deps = sort_dependencies(
[('fixtures_regress', [Person, ExternalDependency, Book])]
)
self.assertEqual(
sorted_deps,
[Person, Book, ExternalDependency]
)
def test_normal_pk(self):
"""
Check that normal primary keys still work
on a model with natural key capabilities
"""
management.call_command(
'loaddata',
'non_natural_1.json',
verbosity=0,
commit=False
)
management.call_command(
'loaddata',
'forward_ref_lookup.json',
verbosity=0,
commit=False
)
management.call_command(
'loaddata',
'non_natural_2.xml',
verbosity=0,
commit=False
)
books = Book.objects.all()
self.assertEqual(
books.__repr__(),
"""[<Book: Cryptonomicon by Neal Stephenson (available at Amazon, Borders)>, <Book: Ender's Game by Orson Scott Card (available at Collins Bookstore)>, <Book: Permutation City by Greg Egan (available at Angus and Robertson)>]"""
)
class TestTicket11101(TransactionTestCase):
def ticket_11101(self):
management.call_command(
'loaddata',
'thingy.json',
verbosity=0,
commit=False
)
self.assertEqual(Thingy.objects.count(), 1)
transaction.rollback()
self.assertEqual(Thingy.objects.count(), 0)
transaction.commit()
@skipUnlessDBFeature('supports_transactions')
def test_ticket_11101(self):
"""Test that fixtures can be rolled back (ticket #11101)."""
ticket_11101 = transaction.commit_manually(self.ticket_11101)
ticket_11101()
| mixman/djangodev | tests/regressiontests/fixtures_regress/tests.py | Python | bsd-3-clause | 21,754 |
# Generated by Haxe 3.3.0
import matplotlib.pyplot as matplotlib_pyplot_Pyplot_Module
import pandas as pandas_Pandas_Module
import seaborn as seaborn_Seaborn_Module
class Script:
__slots__ = ()
@staticmethod
def main():
seaborn_Seaborn_Module.set_style("dark")
housing_2013 = pandas_Pandas_Module.read_csv("../Hud_2013.csv")
cols = ['AGE1', 'FMR', 'TOTSAL']
filtered_housing_2013 = housing_2013[cols]
filtered_housing_2013.hist(column='AGE1', bins=5)
filtered_housing_2013.hist(column='AGE1', bins=10)
matplotlib_pyplot_Pyplot_Module.show()
Script.main() | ustutz/dataquest | Data_Analyst/Step_7_Python_Applications/Python_for_Business_Analysts/1_Introduction_to_Pandas/8_Practice_histograms/script.py | Python | mit | 629 |
import unittest
from test.test_support import requires
import Tkinter as tk
from Tkinter import Text as tkText
from idlelib.idle_test.mock_tk import Text as mkText
from idlelib.IdleHistory import History
from idlelib.configHandler import idleConf
line1 = 'a = 7'
line2 = 'b = a'
class StoreTest(unittest.TestCase):
'''Tests History.__init__ and History.store with mock Text'''
@classmethod
def setUpClass(cls):
cls.text = mkText()
cls.history = History(cls.text)
def tearDown(self):
self.text.delete('1.0', 'end')
self.history.history = []
def test_init(self):
self.assertIs(self.history.text, self.text)
self.assertEqual(self.history.history, [])
self.assertIsNone(self.history.prefix)
self.assertIsNone(self.history.pointer)
self.assertEqual(self.history.cyclic,
idleConf.GetOption("main", "History", "cyclic", 1, "bool"))
def test_store_short(self):
self.history.store('a')
self.assertEqual(self.history.history, [])
self.history.store(' a ')
self.assertEqual(self.history.history, [])
def test_store_dup(self):
self.history.store(line1)
self.assertEqual(self.history.history, [line1])
self.history.store(line2)
self.assertEqual(self.history.history, [line1, line2])
self.history.store(line1)
self.assertEqual(self.history.history, [line2, line1])
def test_store_reset(self):
self.history.prefix = line1
self.history.pointer = 0
self.history.store(line2)
self.assertIsNone(self.history.prefix)
self.assertIsNone(self.history.pointer)
class TextWrapper:
def __init__(self, master):
self.text = tkText(master=master)
self._bell = False
def __getattr__(self, name):
return getattr(self.text, name)
def bell(self):
self._bell = True
class FetchTest(unittest.TestCase):
'''Test History.fetch with wrapped tk.Text.
'''
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = tk.Tk()
def setUp(self):
self.text = text = TextWrapper(self.root)
text.insert('1.0', ">>> ")
text.mark_set('iomark', '1.4')
text.mark_gravity('iomark', 'left')
self.history = History(text)
self.history.history = [line1, line2]
@classmethod
def tearDownClass(cls):
cls.root.destroy()
def fetch_test(self, reverse, line, prefix, index, bell=False):
# Perform one fetch as invoked by Alt-N or Alt-P
# Test the result. The line test is the most important.
# The last two are diagnostic of fetch internals.
History = self.history
History.fetch(reverse)
Equal = self.assertEqual
Equal(self.text.get('iomark', 'end-1c'), line)
Equal(self.text._bell, bell)
if bell:
self.text._bell = False
Equal(History.prefix, prefix)
Equal(History.pointer, index)
Equal(self.text.compare("insert", '==', "end-1c"), 1)
def test_fetch_prev_cyclic(self):
prefix = ''
test = self.fetch_test
test(True, line2, prefix, 1)
test(True, line1, prefix, 0)
test(True, prefix, None, None, bell=True)
def test_fetch_next_cyclic(self):
prefix = ''
test = self.fetch_test
test(False, line1, prefix, 0)
test(False, line2, prefix, 1)
test(False, prefix, None, None, bell=True)
# Prefix 'a' tests skip line2, which starts with 'b'
def test_fetch_prev_prefix(self):
prefix = 'a'
self.text.insert('iomark', prefix)
self.fetch_test(True, line1, prefix, 0)
self.fetch_test(True, prefix, None, None, bell=True)
def test_fetch_next_prefix(self):
prefix = 'a'
self.text.insert('iomark', prefix)
self.fetch_test(False, line1, prefix, 0)
self.fetch_test(False, prefix, None, None, bell=True)
def test_fetch_prev_noncyclic(self):
prefix = ''
self.history.cyclic = False
test = self.fetch_test
test(True, line2, prefix, 1)
test(True, line1, prefix, 0)
test(True, line1, prefix, 0, bell=True)
def test_fetch_next_noncyclic(self):
prefix = ''
self.history.cyclic = False
test = self.fetch_test
test(False, prefix, None, None, bell=True)
test(True, line2, prefix, 1)
test(False, prefix, None, None, bell=True)
test(False, prefix, None, None, bell=True)
def test_fetch_cursor_move(self):
# Move cursor after fetch
self.history.fetch(reverse=True) # initialization
self.text.mark_set('insert', 'iomark')
self.fetch_test(True, line2, None, None, bell=True)
def test_fetch_edit(self):
# Edit after fetch
self.history.fetch(reverse=True) # initialization
self.text.delete('iomark', 'insert', )
self.text.insert('iomark', 'a =')
self.fetch_test(True, line1, 'a =', 0) # prefix is reset
def test_history_prev_next(self):
# Minimally test functions bound to events
self.history.history_prev('dummy event')
self.assertEqual(self.history.pointer, 1)
self.history.history_next('dummy event')
self.assertEqual(self.history.pointer, None)
if __name__ == '__main__':
unittest.main(verbosity=2, exit=2)
| alanjw/GreenOpenERP-Win-X86 | python/Lib/idlelib/idle_test/test_idlehistory.py | Python | agpl-3.0 | 5,612 |
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import pytest
import numpy as np
from scipy import sparse
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import _preprocess_data
from sklearn.linear_model.base import _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
rng = np.random.RandomState(0)
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [1])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
reg = LinearRegression()
reg.fit(X, Y)
assert_array_almost_equal(reg.coef_, [0])
assert_array_almost_equal(reg.intercept_, [0])
assert_array_almost_equal(reg.predict(X), [0])
def test_linear_regression_sample_weights():
# TODO: loop over sparse data as well
rng = np.random.RandomState(0)
# It would not work with under-determined systems
for n_samples, n_features in ((6, 5), ):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
for intercept in (True, False):
# LinearRegression with explicit sample_weight
reg = LinearRegression(fit_intercept=intercept)
reg.fit(X, y, sample_weight=sample_weight)
coefs1 = reg.coef_
inter1 = reg.intercept_
assert_equal(reg.coef_.shape, (X.shape[1], )) # sanity checks
assert_greater(reg.score(X, y), 0.5)
# Closed form of the weighted least square
# theta = (X^T W X)^(-1) * X^T W y
W = np.diag(sample_weight)
if intercept is False:
X_aug = X
else:
dummy_column = np.ones(shape=(n_samples, 1))
X_aug = np.concatenate((dummy_column, X), axis=1)
coefs2 = linalg.solve(X_aug.T.dot(W).dot(X_aug),
X_aug.T.dot(W).dot(y))
if intercept is False:
assert_array_almost_equal(coefs1, coefs2)
else:
assert_array_almost_equal(coefs1, coefs2[1:])
assert_almost_equal(inter1, coefs2[0])
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
reg = LinearRegression()
# make sure the "OK" sample weights actually work
reg.fit(X, y, sample_weights_OK)
reg.fit(X, y, sample_weights_OK_1)
reg.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
# Test that linear regression also works with sparse data
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.predict(X) - y.ravel(), 0)
def test_linear_regression_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
reg = LinearRegression(fit_intercept=True)
reg.fit((X), Y)
assert_equal(reg.coef_.shape, (2, n_features))
Y_pred = reg.predict(X)
reg.fit(X, y)
y_pred = reg.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
# Test multiple-outcome linear regressions with sparse data
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_preprocess_data():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
expected_X_norm = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_preprocess_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [X, sparse.csc_matrix(X)]
for X in args:
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = _preprocess_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_preprocess_data_weighted():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
expected_X_norm = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_norm)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_preprocess_data_with_return_mean():
n_samples = 200
n_features = 2
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
expected_X_norm = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=False, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=False,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_norm = \
_preprocess_data(X, y, fit_intercept=True, normalize=True,
return_mean=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_norm, expected_X_norm)
assert_array_almost_equal(Xt.A, XA / expected_X_norm)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_preprocess_data():
# Test output format of _preprocess_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = _preprocess_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
@pytest.mark.parametrize('is_sparse', (True, False))
@pytest.mark.parametrize('to_copy', (True, False))
def test_preprocess_copy_data_no_checks(is_sparse, to_copy):
X, y = make_regression()
X[X < 2.5] = 0.0
if is_sparse:
X = sparse.csr_matrix(X)
X_, y_, _, _, _ = _preprocess_data(X, y, True,
copy=to_copy, check_input=False)
if to_copy and is_sparse:
assert not np.may_share_memory(X_.data, X.data)
elif to_copy:
assert not np.may_share_memory(X_, X)
elif is_sparse:
assert np.may_share_memory(X_.data, X.data)
else:
assert np.may_share_memory(X_, X)
def test_dtype_preprocess_data():
n_samples = 200
n_features = 2
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
X_32 = np.asarray(X, dtype=np.float32)
y_32 = np.asarray(y, dtype=np.float32)
X_64 = np.asarray(X, dtype=np.float64)
y_64 = np.asarray(y, dtype=np.float64)
for fit_intercept in [True, False]:
for normalize in [True, False]:
Xt_32, yt_32, X_mean_32, y_mean_32, X_norm_32 = _preprocess_data(
X_32, y_32, fit_intercept=fit_intercept, normalize=normalize,
return_mean=True)
Xt_64, yt_64, X_mean_64, y_mean_64, X_norm_64 = _preprocess_data(
X_64, y_64, fit_intercept=fit_intercept, normalize=normalize,
return_mean=True)
Xt_3264, yt_3264, X_mean_3264, y_mean_3264, X_norm_3264 = (
_preprocess_data(X_32, y_64, fit_intercept=fit_intercept,
normalize=normalize, return_mean=True))
Xt_6432, yt_6432, X_mean_6432, y_mean_6432, X_norm_6432 = (
_preprocess_data(X_64, y_32, fit_intercept=fit_intercept,
normalize=normalize, return_mean=True))
assert_equal(Xt_32.dtype, np.float32)
assert_equal(yt_32.dtype, np.float32)
assert_equal(X_mean_32.dtype, np.float32)
assert_equal(y_mean_32.dtype, np.float32)
assert_equal(X_norm_32.dtype, np.float32)
assert_equal(Xt_64.dtype, np.float64)
assert_equal(yt_64.dtype, np.float64)
assert_equal(X_mean_64.dtype, np.float64)
assert_equal(y_mean_64.dtype, np.float64)
assert_equal(X_norm_64.dtype, np.float64)
assert_equal(Xt_3264.dtype, np.float32)
assert_equal(yt_3264.dtype, np.float32)
assert_equal(X_mean_3264.dtype, np.float32)
assert_equal(y_mean_3264.dtype, np.float32)
assert_equal(X_norm_3264.dtype, np.float32)
assert_equal(Xt_6432.dtype, np.float64)
assert_equal(yt_6432.dtype, np.float64)
assert_equal(X_mean_6432.dtype, np.float64)
assert_equal(y_mean_6432.dtype, np.float64)
assert_equal(X_norm_6432.dtype, np.float64)
assert_equal(X_32.dtype, np.float32)
assert_equal(y_32.dtype, np.float32)
assert_equal(X_64.dtype, np.float64)
assert_equal(y_64.dtype, np.float64)
assert_array_almost_equal(Xt_32, Xt_64)
assert_array_almost_equal(yt_32, yt_64)
assert_array_almost_equal(X_mean_32, X_mean_64)
assert_array_almost_equal(y_mean_32, y_mean_64)
assert_array_almost_equal(X_norm_32, X_norm_64)
def test_rescale_data():
n_samples = 200
n_features = 2
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
| vortex-ape/scikit-learn | sklearn/linear_model/tests/test_base.py | Python | bsd-3-clause | 15,721 |
# Copyright 2019 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Generate documentation from the recipes and modules in the current repo.
This can output as a protobuf of various forms (JSON, Text or binary), using the
`Doc` message in `recipe_engine/doc.proto`, or can emit gitiles-flavored
Markdown (either on stdout or written to the repo).
"""
def add_arguments(parser):
parser.add_argument(
'recipe', nargs='?', help='Restrict documentation to this recipe')
parser.add_argument(
'--kind', default='gen',
choices=('gen', 'binarypb', 'jsonpb', 'textpb', 'markdown'),
help=(
'Output this kind of documentation. `gen` (the default) will write the'
' standard README.recipes.md file. All others output to stdout'))
def _launch(args):
from .cmd import main
return main(args)
parser.set_defaults(func=_launch)
| luci/recipes-py | recipe_engine/internal/commands/doc/__init__.py | Python | apache-2.0 | 979 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Taiga Agile LLC <support@taiga.io>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from taiga.base import routers
from . import api
router = routers.DefaultRouter(trailing_slash=False)
if settings.STATS_ENABLED:
router.register(r"stats/system", api.SystemStatsViewSet, base_name="system-stats")
router.register(r"stats/discover", api.DiscoverStatsViewSet, base_name="discover-stats")
| dayatz/taiga-back | taiga/stats/routers.py | Python | agpl-3.0 | 1,085 |
"""
Django settings for erp project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
TEMPLATE_CONTEXT_PROCESSORS = TCP + (
'django.core.context_processors.request',
'django.core.context_processors.csrf',
)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e28699qn)epm=n!xab(#uj(i#3*z4vm=id(wpq5u3au!6tb4wv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ui',
'modulos.dashboard',
'modulos.articulos',
'modulos.clientes',
'modulos.proveedores',
'modulos.estadisticas',
'modulos.materiales',
'modulos.ventas',
'modulos.pedidos',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'erp.urls'
WSGI_APPLICATION = 'erp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'es-ve'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| urkh/erp | erp/settings.py | Python | mit | 2,380 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'Cat', fields ['slug']
db.create_unique('punns_cat', ['slug'])
# Adding field 'Punn.publish_on_facebook'
db.add_column('punns_punn', 'publish_on_facebook',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Removing unique constraint on 'Cat', fields ['slug']
db.delete_unique('punns_cat', ['slug'])
# Deleting field 'Punn.publish_on_facebook'
db.delete_column('punns_punn', 'publish_on_facebook')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'punns.cat': {
'Meta': {'object_name': 'Cat'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '140'})
},
'punns.favorite': {
'Meta': {'object_name': 'Favorite'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'punn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['punns.Punn']"})
},
'punns.punn': {
'Meta': {'object_name': 'Punn'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'base62id': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'cat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['punns.Cat']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_top': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_video': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'karma': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'original_punn': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['punns.Punn']", 'null': 'True', 'blank': 'True'}),
'pic': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'publish_on_facebook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['punns.Tags']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'youtube_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'punns.reblog': {
'Meta': {'object_name': 'Reblog'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'origin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['punns.Punn']"})
},
'punns.tags': {
'Meta': {'object_name': 'Tags'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '140'})
}
}
complete_apps = ['punns'] | carquois/blobon | blobon/punns/migrations/0028_auto__add_unique_cat_slug__add_field_punn_publish_on_facebook.py | Python | mit | 8,119 |
import os
import uuid
import shutil
import tarfile
import subprocess
import gzip
import zlib
from lutris.util import system
from lutris.util.log import logger
from lutris import settings
class ExtractFailure(Exception):
"""Exception raised when and archive fails to extract"""
def is_7zip_supported(path, extractor):
supported_extractors = (
"7z",
"xz",
"bzip2",
"gzip",
"tar",
"zip",
"ar",
"arj",
"cab",
"chm",
"cpio",
"cramfs",
"dmg",
"ext",
"fat",
"gpt",
"hfs",
"ihex",
"iso",
"lzh",
"lzma",
"mbr",
"msi",
"nsis",
"ntfs",
"qcow2",
"rar",
"rpm",
"squashfs",
"udf",
"uefi",
"vdi",
"vhd",
"vmdk",
"wim",
"xar",
"z",
)
if extractor:
return extractor.lower() in supported_extractors
_base, ext = os.path.splitext(path)
if ext:
ext = ext.lstrip(".").lower()
return ext in supported_extractors
def extract_archive(path, to_directory=".", merge_single=True, extractor=None):
path = os.path.abspath(path)
mode = None
logger.debug("Extracting %s to %s", path, to_directory)
if path.endswith(".tar.gz") or path.endswith(".tgz") or extractor == "tgz":
opener, mode = tarfile.open, "r:gz"
elif path.endswith(".tar.xz") or path.endswith(".txz") or extractor == "txz":
opener, mode = tarfile.open, "r:xz"
elif path.endswith(".tar") or extractor == "tar":
opener, mode = tarfile.open, "r:"
elif path.endswith(".gz") or extractor == "gzip":
decompress_gz(path, to_directory)
return
elif path.endswith(".tar.bz2") or path.endswith(".tbz") or extractor == "bz2":
opener, mode = tarfile.open, "r:bz2"
elif is_7zip_supported(path, extractor):
opener = "7zip"
else:
raise RuntimeError(
"Could not extract `%s` as no appropriate extractor is found" % path
)
temp_name = ".extract-" + str(uuid.uuid4())[:8]
temp_path = temp_dir = os.path.join(to_directory, temp_name)
try:
_do_extract(path, temp_path, opener, mode, extractor)
except (OSError, zlib.error) as ex:
logger.exception("Extraction failed: %s", ex)
raise ExtractFailure(str(ex))
if merge_single:
extracted = os.listdir(temp_path)
if len(extracted) == 1:
temp_path = os.path.join(temp_path, extracted[0])
if os.path.isfile(temp_path):
destination_path = os.path.join(to_directory, extracted[0])
if os.path.isfile(destination_path):
logger.warning("Overwrite existing file %s", destination_path)
os.remove(destination_path)
shutil.move(temp_path, to_directory)
os.removedirs(temp_dir)
else:
for archive_file in os.listdir(temp_path):
source_path = os.path.join(temp_path, archive_file)
destination_path = os.path.join(to_directory, archive_file)
# logger.debug("Moving extracted files from %s to %s", source_path, destination_path)
if system.path_exists(destination_path):
logger.warning("Overwrite existing path %s", destination_path)
if os.path.isfile(destination_path):
os.remove(destination_path)
shutil.move(source_path, destination_path)
elif os.path.isdir(destination_path):
system.merge_folders(source_path, destination_path)
else:
shutil.move(source_path, destination_path)
system.remove_folder(temp_dir)
logger.debug("Finished extracting %s to %s", path, to_directory)
return path, to_directory
def _do_extract(archive, dest, opener, mode=None, extractor=None):
if opener == "7zip":
extract_7zip(archive, dest, archive_type=extractor)
else:
handler = opener(archive, mode)
handler.extractall(dest)
handler.close()
def decompress_gz(file_path, dest_path=None):
"""Decompress a gzip file."""
if dest_path:
dest_filename = os.path.join(dest_path, os.path.basename(file_path[:-3]))
else:
dest_filename = file_path[:-3]
gzipped_file = gzip.open(file_path, "rb")
file_content = gzipped_file.read()
gzipped_file.close()
dest_file = open(dest_filename, "wb")
dest_file.write(file_content)
dest_file.close()
return dest_path
def extract_7zip(path, dest, archive_type=None):
_7zip_path = os.path.join(settings.RUNTIME_DIR, "p7zip/7z")
if not system.path_exists(_7zip_path):
_7zip_path = system.find_executable("7z")
if not system.path_exists(_7zip_path):
raise OSError("7zip is not found in the lutris runtime or on the system")
command = [_7zip_path, "x", path, "-o{}".format(dest), "-aoa"]
if archive_type:
command.append("-t{}".format(archive_type))
subprocess.call(command)
| daniel-j/lutris | lutris/util/extract.py | Python | gpl-3.0 | 5,089 |
#!/usr/bin/python
import os, sys
import argparse
# Locate the moose/python directory
sys.path.append(os.path.join(os.path.abspath('..'), 'python'))
# Load the required moose/python packages
from FactorySystem import ParseGetPot
from PresentationBuilder import base
if __name__ == '__main__':
# Create the argument parser
parser = argparse.ArgumentParser(description='A wiki presentation builder')
parser.add_argument('input', type=str, help='Input file name')
parser.add_argument('--format', '-f', type=str, default='remark', help='Select the presentation output format (remark | reveal)')
args = parser.parse_args()
# Build the presentation
builder = base.PresentationBuilder(args.input, format=args.format)
builder.write()
| danielru/moose | scripts/presentation_builder.py | Python | lgpl-2.1 | 746 |
# -*- coding: utf-8 -*-
# <nbformat>2</nbformat>
# <codecell>
# Import Libraries
import time as time
import numpy as np
import scipy as sp
import pylab as pl
import Image
#
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import Ward
# Get SEM BSE Tiff Data
tiffFile = ('/home/jon/Desktop/gsoc2014/semData2014/CML0615(13).tif')
im = Image.open(tiffFile)
im.show()
# Get SEM Data
#
#get the file path
#tifPath = '/home/jon/Desktop/gsoc2014/semImages/'
#tifFile = 'CML0615(13).tif'
#tifPathAndFile = tifPath + tifFile
#
#import in the tif
#imFull = Image.open(tifPathAndFile)
#imFull.show()
imFull = Image.open(tiffFile)
[xmax, ymax] = imFull.size
print xmax
print ymax
#crop the tiff
#x1, y1, x2, y2
#starts in bottom left corner and works to upper right corner
#for starts x1 = 0 , y1 = 0 END xN = max, yN = max
#while x < xmax , while y < ymax ; increase x by 250, increase y by 250 For roughly 50 images
x1 = 0
y1 = 0
x2 = 250
y2 = 250
box=(x1, y1, x2, y2)
#
im_crop=imFull.crop(box)
#
im_crop.show()
#reassign im
im = im_crop
imarray = np.array(im)
# Convert Tif into "LENA"
lena = im
X = np.reshape(lena, (-1, 1))
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*imarray.shape)
# Compute clustering
print "Compute structured hierarchical clustering..."
#
st = time.time()
#
n_clusters = 200 # number of regions
#
ward = Ward(n_clusters=n_clusters, connectivity=connectivity).fit(X)
label = np.reshape(ward.labels_, imarray.shape)
# Path names for images
#
#saved file path
figurePath = '/home/jon/Desktop/'
#saved file ext
figureExt = '.png'
############
# Plot the crop bse image
pl.figure(figsize=(5, 5))
pl.imshow(lena, cmap=pl.cm.gray)
#
pl.xticks(())
pl.yticks(())
#
#saved file name
figureNameContour = 'cropped bse image'
#
#saved path and file and ext
figurePathNameContourExt = figurePath + figureNameContour + figureExt
#
pl.savefig(figurePathNameContourExt)
#
pl.show()
###########
##############
# Plot the contour results of image
pl.figure(figsize=(5, 5))
pl.imshow(lena, cmap=pl.cm.gray)
#
for l in range(n_clusters):
pl.contour(label == l, contours=1,
colors=[pl.cm.spectral(l / float(n_clusters)), ])
#
pl.xticks(())
pl.yticks(())
#
#saved file name
figureNameContour = 'testContour'
#
#saved path and file and ext
figurePathNameContourExt = figurePath + figureNameContour + figureExt
#
pl.savefig(figurePathNameContourExt)
#
pl.show()
######
# Plot the reasign grain results of image
pl.figure(figsize=(5, 5))
pl.imshow(label, cmap=pl.cm.gray)
#
pl.xticks(())
pl.yticks(())
#
#saved file name
figureNameShade = 'testShade'
#
#saved path and file and ext
figurePathNameShadeExt = figurePath + figureNameShade + figureExt
#
pl.savefig(figurePathNameShadeExt)
pl.show()
# <codecell>
import matplotlib.pyplot as plt
####Histogram of label
labelHist = np.histogram(label, bins = range(200) )
#print labelHist
#plt.hist(label)
#plt.show()
#Get X and Y values for Hist into Bar
#labelHistSize = np.size(labelHist)
labelHistBarXVal = labelHist[1]
labelHistBarYVal = labelHist[0]
#
labelHistBarXVal = labelHistBarXVal[1:]
#
plt.bar(labelHistBarXVal,labelHistBarYVal)
#label bar graph
plt.ylabel('Counts')
plt.xlabel('Mineral ID number')
plt.title('Pixel Size of Mineral Groups')
plt.savefig('pixelSizeOfMineralGroups.png')
#show bar graph
plt.show()
# <codecell>
#########
#run histogram on image
tiffHistogram = im.histogram()
#Get X and Y values for Hist into Bar
tiffHistSize = np.size(tiffHistogram)
#
tiffHistBarXVal = range(tiffHistSize)
tiffHistBarYVal = tiffHistogram
#create bar graph
plt.bar(tiffHistBarXVal,tiffHistBarYVal)
#label bar graph
plt.ylabel('Counts')
plt.xlabel('keV Bucket via Grey Scale Value count from BSE Image')
plt.title('BSE Tiff Histogram for Croped Area')
plt.savefig('bseDistOfCrop.png')
#show bar graph
plt.show()
# <codecell>
####Boxplot for BSE values for Mineral groups
from pylab import *
#####Convert fake data to real data!!!!!!!!!!!!!!
#use minearl Id numbers (label) and BSE 256 values (imarray)
#print label
#print imarray
#for each number in the 'label' have a list of 'imarray' values
#print np.shape(label)
#print np.shape(imarray)
#
labelReshape = np.reshape(label,(62500,1))
#print np.shape(labelReshape)
#
imarrayReshape = np.reshape(imarray,(62500,1))
#print np.shape(imarrayReshape)
#
labelBSEValue = np.column_stack( (labelReshape, imarrayReshape) )
#print labelBSEValue
print np.shape(labelBSEValue)
#
###Create a 200 x 200 Matrix to fill with these values
zerosTwoHundredSquare = np.zeros( shape=(200,201) )
print np.shape(zerosTwoHundredSquare)
#
for i in range(200):
zerosTwoHundredSquare[i,0] = i
#
print zerosTwoHundredSquare
#####
####
print '\n' + 'Line Break Between Empty Matrix and Filled' + '\n'
####
###Fill the 200x200 matrix with valus
#for i in range(np.size(labelBSEValue)):
# i is the row number of the 2D array
# j is the mineral ID number
counter = 1
for matrixCell in range(200):
#counter = 1
for mineralID in range(200):
if labelBSEValue[matrixCell,0] == mineralID:
zerosTwoHundredSquare[mineralID,counter] = labelBSEValue[matrixCell,1]
#
#print counter
counter += 1
print zerosTwoHundredSquare[198]
###Duplicate zerosTwoHundredSquare to remove Zeros
noZeros = np.zeros( shape=(200,201) )
for arrayRow in range(200):
noZeros[arrayRow] = zerosTwoHundredSquare[arrayRow]
print noZeros
#
#
#Convert from array to List
noZerosList = (0)*200
for arrayRow in range(200):
noZerosList[arrayRow] = noZeros[arrayRow].tolist()
print noZerosList
# <markdowncell>
# for matrixCell in range(200):
# for mineralID in range(200):
# counter = 1
# if labelBSEValue[matrixCell,0] == mineralID:
# print 'mineral ID is ' + str( mineralID ) + ' and ' + 'BSE is ' + str( labelBSEValue[matrixCell,1] )
# zerosTwoHundredSquare[mineralID,counter] = labelBSEValue[matrixCell,1]
# counter += 1
#
# print zerosTwoHundredSquare
# <codecell>
####
# fake up some data
spread= rand(50) * 100
center = ones(25) * 50
flier_high = rand(10) * 100 + 100
flier_low = rand(10) * -100
#
data =concatenate((spread, center, flier_high, flier_low), 0)
####
#####
#Fake data for multiple box plots
spread= rand(50) * 100
center = ones(25) * 40
flier_high = rand(10) * 100 + 100
flier_low = rand(10) * -100
d2 = concatenate( (spread, center, flier_high, flier_low), 0 )
data.shape = (-1, 1)
d2.shape = (-1, 1)
#
data = [data, d2, d2[::2,0]]
######
# basic plot
boxplot(data)
#label bar graph
plt.ylabel('BSE 256 Values')
plt.xlabel('Mineral ID Number')
plt.title('Boxplot of BSE values for Mineral Groups')
plt.savefig('Boxplot of BSE values for Mineral Groups.png')
#show bar graph
plt.show()
# <codecell>
| jonpdx/gsoc2014 | bseTiffToClusters.py | Python | gpl-2.0 | 6,859 |
import conedy as co
N = co.network()
co.set("ornUhl_drift" , 0.2)
co.set("ornUhl_diffusion" , 0.1)
co.set("samplingTime", 0.1)
N.addNode(co.ornUhl())
N.setState(0, 1.0)
N.observeTime("output/sdeIntegrator.py.series")
N.observeAll("output/sdeIntegrator.py.series", co.component(0))
N.evolve(0.0,15000.0)
# to calculate the variance of the ornstein-uhlenbeck
# the variance should be diffusion^2/(2*drift)
file = open('output/sdeIntegrator.py.series')
sum = 0
s2 = 0
n = 0
for line in file:
fl = float(line.split()[1])
sum += fl
s2 += fl*fl
n +=1
variance = (s2 - (sum*sum)/n)/n
print "should be around 0.025 : " +str(variance)
| Conedy/Conedy | testing/integrators/sdeIntegrator.py | Python | gpl-2.0 | 643 |
from __future__ import unicode_literals
import posixpath
import re
from django import template
from django.conf import settings
from django.contrib.staticfiles.storage import staticfiles_storage
from django.forms.utils import flatatt
from django.template.base import token_kwargs
from systemjs.base import System
register = template.Library()
# Regex for token keyword arguments
kwarg_re = re.compile(r"(?:(\w+)=)?(.+)")
class SystemImportNode(template.Node):
def __init__(self, path, tag_attrs=None):
self.path = path
self.tag_attrs = tag_attrs
def render(self, context):
"""
Build the filepath by appending the extension.
"""
module_path = self.path.resolve(context)
if not settings.SYSTEMJS_ENABLED:
if settings.SYSTEMJS_DEFAULT_JS_EXTENSIONS:
name, ext = posixpath.splitext(module_path)
if not ext:
module_path = '{}.js'.format(module_path)
if settings.SYSTEMJS_SERVER_URL:
tpl = """<script src="{url}{app}" type="text/javascript"></script>"""
else:
tpl = """<script type="text/javascript">System.import('{app}');</script>"""
return tpl.format(app=module_path, url=settings.SYSTEMJS_SERVER_URL)
# else: create a bundle
rel_path = System.get_bundle_path(module_path)
url = staticfiles_storage.url(rel_path)
tag_attrs = {'type': 'text/javascript'}
for key, value in self.tag_attrs.items():
if not isinstance(value, bool):
value = value.resolve(context)
tag_attrs[key] = value
return """<script{attrs} src="{url}"></script>""".format(
url=url, attrs=flatatt(tag_attrs)
)
@classmethod
def handle_token(cls, parser, token):
bits = token.split_contents()
attrs = {}
if len(bits) < 2:
raise template.TemplateSyntaxError("'%s' takes at least one argument (js module)" % bits[0])
if len(bits) > 2:
for bit in bits[2:]:
# First we try to extract a potential kwarg from the bit
kwarg = token_kwargs([bit], parser)
if kwarg:
attrs.update(kwarg)
else:
attrs[bit] = True # for flatatt
path = parser.compile_filter(bits[1])
return cls(path, tag_attrs=attrs)
@register.tag
def systemjs_import(parser, token):
"""
Import a Javascript module via SystemJS, bundling the app.
Syntax::
{% systemjs_import 'path/to/file' %}
Example::
{% systemjs_import 'mydjangoapp/js/myapp' %}
Which would be rendered like::
<script type="text/javascript" src="/static/CACHE/mydjangoapp.js.min.myapp.js"></script>
where /static/CACHE can be configured through settings.
In DEBUG mode, the result would be
<script type="text/javascript">System.import('mydjangoapp/js/myapp.js');</script>
"""
return SystemImportNode.handle_token(parser, token)
| sergei-maertens/django-systemjs | systemjs/templatetags/system_tags.py | Python | mit | 3,097 |
import logging
import os
from typing import Text, Dict, Optional, List, Any, Iterable, Tuple, Union
from pathlib import Path
from rasa.core.agent import Agent
from rasa.engine.storage.local_model_storage import LocalModelStorage
import rasa.shared.utils.cli
import rasa.shared.utils.common
import rasa.shared.utils.io
import rasa.utils.common
from rasa.constants import RESULTS_FILE, NUMBER_OF_TRAINING_STORIES_FILE
from rasa.exceptions import ModelNotFound
from rasa.shared.constants import DEFAULT_RESULTS_PATH
import rasa.shared.nlu.training_data.loading
from rasa.shared.importers.autoconfig import TrainingType
from rasa.shared.nlu.training_data.training_data import TrainingData
import rasa.model
logger = logging.getLogger(__name__)
async def test_core_models_in_directory(
model_directory: Text,
stories: Text,
output: Text,
use_conversation_test_files: bool = False,
) -> None:
"""Evaluates a directory with multiple Core models using test data.
Args:
model_directory: Directory containing multiple model files.
stories: Path to a conversation test file.
output: Output directory to store results to.
use_conversation_test_files: `True` if conversation test files should be used
for testing instead of regular Core story files.
"""
from rasa.core.test import compare_models_in_dir
model_directory = _get_sanitized_model_directory(model_directory)
await compare_models_in_dir(
model_directory,
stories,
output,
use_conversation_test_files=use_conversation_test_files,
)
story_n_path = os.path.join(model_directory, NUMBER_OF_TRAINING_STORIES_FILE)
number_of_stories = rasa.shared.utils.io.read_json_file(story_n_path)
plot_core_results(output, number_of_stories)
def plot_core_results(output_directory: Text, number_of_examples: List[int]) -> None:
"""Plot core model comparison graph.
Args:
output_directory: path to the output directory
number_of_examples: number of examples per run
"""
import rasa.utils.plotting as plotting_utils
graph_path = os.path.join(output_directory, "core_model_comparison_graph.pdf")
plotting_utils.plot_curve(
output_directory,
number_of_examples,
x_label_text="Number of stories present during training",
y_label_text="Number of correct test stories",
graph_path=graph_path,
)
def _get_sanitized_model_directory(model_directory: Text) -> Text:
"""Adjusts the `--model` argument of `rasa test core` when called with
`--evaluate-model-directory`.
By default rasa uses the latest model for the `--model` parameter. However, for
`--evaluate-model-directory` we need a directory. This function checks if the
passed parameter is a model or an individual model file.
Args:
model_directory: The model_directory argument that was given to
`test_core_models_in_directory`.
Returns: The adjusted model_directory that should be used in
`test_core_models_in_directory`.
"""
p = Path(model_directory)
if p.is_file():
if model_directory != rasa.model.get_latest_model():
rasa.shared.utils.cli.print_warning(
"You passed a file as '--model'. Will use the directory containing "
"this file instead."
)
model_directory = str(p.parent)
return model_directory
async def test_core_models(
models: List[Text],
stories: Text,
output: Text,
use_conversation_test_files: bool = False,
) -> None:
"""Compares multiple Core models based on test data.
Args:
models: A list of models files.
stories: Path to test data.
output: Path to output directory for test results.
use_conversation_test_files: `True` if conversation test files should be used
for testing instead of regular Core story files.
"""
from rasa.core.test import compare_models
await compare_models(
models,
stories,
output,
use_conversation_test_files=use_conversation_test_files,
)
async def test_core(
model: Optional[Text] = None,
stories: Optional[Text] = None,
output: Text = DEFAULT_RESULTS_PATH,
additional_arguments: Optional[Dict] = None,
use_conversation_test_files: bool = False,
) -> None:
"""Tests a trained Core model against a set of test stories."""
try:
model = rasa.model.get_local_model(model)
except ModelNotFound:
rasa.shared.utils.cli.print_error(
"Unable to test: could not find a model. Use 'rasa train' to train a "
"Rasa model and provide it via the '--model' argument."
)
return
metadata = LocalModelStorage.metadata_from_archive(model)
if metadata.training_type == TrainingType.NLU:
rasa.shared.utils.cli.print_error(
"Unable to test: no core model found. Use 'rasa train' to train a "
"Rasa model and provide it via the '--model' argument."
)
elif metadata.training_type == TrainingType.CORE and use_conversation_test_files:
rasa.shared.utils.cli.print_warning(
"No NLU model found. Using default 'RegexMessageHandler' for end-to-end "
"evaluation. If you added actual user messages to your test stories "
"this will likely lead to the tests failing. In that case, you need "
"to train a NLU model first, e.g. using `rasa train`."
)
if additional_arguments is None:
additional_arguments = {}
if output:
rasa.shared.utils.io.create_directory(output)
_agent = Agent.load(model_path=model)
if not _agent.is_ready():
rasa.shared.utils.cli.print_error(
"Unable to test: processor not loaded. Use 'rasa train' to train a "
"Rasa model and provide it via the '--model' argument."
)
return
from rasa.core.test import test as core_test
kwargs = rasa.shared.utils.common.minimal_kwargs(
additional_arguments, core_test, ["stories", "agent", "e2e"]
)
await core_test(
stories,
_agent,
e2e=use_conversation_test_files,
out_directory=output,
**kwargs,
)
async def test_nlu(
model: Optional[Text],
nlu_data: Optional[Text],
output_directory: Text = DEFAULT_RESULTS_PATH,
additional_arguments: Optional[Dict] = None,
) -> None:
"""Tests the NLU Model."""
from rasa.nlu.test import run_evaluation
rasa.shared.utils.io.create_directory(output_directory)
try:
model = rasa.model.get_local_model(model)
except ModelNotFound:
rasa.shared.utils.cli.print_error(
"Could not find any model. Use 'rasa train nlu' to train a "
"Rasa model and provide it via the '--model' argument."
)
return
metadata = LocalModelStorage.metadata_from_archive(model)
if os.path.exists(model) and metadata.training_type != TrainingType.CORE:
kwargs = rasa.shared.utils.common.minimal_kwargs(
additional_arguments, run_evaluation, ["data_path", "model"]
)
_agent = Agent.load(model_path=model)
await run_evaluation(
nlu_data, _agent.processor, output_directory=output_directory, **kwargs
)
else:
rasa.shared.utils.cli.print_error(
"Could not find any model. Use 'rasa train nlu' to train a "
"Rasa model and provide it via the '--model' argument."
)
async def compare_nlu_models(
configs: List[Text],
test_data: TrainingData,
output: Text,
runs: int,
exclusion_percentages: List[int],
) -> None:
"""Trains multiple models, compares them and saves the results."""
from rasa.nlu.test import drop_intents_below_freq
from rasa.nlu.utils import write_json_to_file
from rasa.utils.io import create_path
from rasa.nlu.test import compare_nlu
test_data = drop_intents_below_freq(test_data, cutoff=5)
create_path(output)
bases = [os.path.basename(nlu_config) for nlu_config in configs]
model_names = [os.path.splitext(base)[0] for base in bases]
f1_score_results = {
model_name: [[] for _ in range(runs)] for model_name in model_names
}
training_examples_per_run = await compare_nlu(
configs,
test_data,
exclusion_percentages,
f1_score_results,
model_names,
output,
runs,
)
f1_path = os.path.join(output, RESULTS_FILE)
write_json_to_file(f1_path, f1_score_results)
plot_nlu_results(output, training_examples_per_run)
def plot_nlu_results(output_directory: Text, number_of_examples: List[int]) -> None:
"""Plot NLU model comparison graph.
Args:
output_directory: path to the output directory
number_of_examples: number of examples per run
"""
import rasa.utils.plotting as plotting_utils
graph_path = os.path.join(output_directory, "nlu_model_comparison_graph.pdf")
plotting_utils.plot_curve(
output_directory,
number_of_examples,
x_label_text="Number of intent examples present during training",
y_label_text="Label-weighted average F1 score on test set",
graph_path=graph_path,
)
async def perform_nlu_cross_validation(
config: Dict[Text, Any],
data: TrainingData,
output: Text,
additional_arguments: Optional[Dict[Text, Any]],
) -> None:
"""Runs cross-validation on test data.
Args:
config: The model configuration.
data: The data which is used for the cross-validation.
output: Output directory for the cross-validation results.
additional_arguments: Additional arguments which are passed to the
cross-validation, like number of `disable_plotting`.
"""
from rasa.nlu.test import (
drop_intents_below_freq,
cross_validate,
log_results,
log_entity_results,
)
additional_arguments = additional_arguments or {}
folds = int(additional_arguments.get("folds", 3))
data = drop_intents_below_freq(data, cutoff=folds)
kwargs = rasa.shared.utils.common.minimal_kwargs(
additional_arguments, cross_validate
)
results, entity_results, response_selection_results = await cross_validate(
data, folds, config, output, **kwargs
)
logger.info(f"CV evaluation (n={folds})")
if any(results):
logger.info("Intent evaluation results")
log_results(results.train, "train")
log_results(results.test, "test")
if any(entity_results):
logger.info("Entity evaluation results")
log_entity_results(entity_results.train, "train")
log_entity_results(entity_results.test, "test")
if any(response_selection_results):
logger.info("Response Selection evaluation results")
log_results(response_selection_results.train, "train")
log_results(response_selection_results.test, "test")
def get_evaluation_metrics(
targets: Iterable[Any],
predictions: Iterable[Any],
output_dict: bool = False,
exclude_label: Optional[Text] = None,
) -> Tuple[Union[Text, Dict[Text, Dict[Text, float]]], float, float, float]:
"""Compute the f1, precision, accuracy and summary report from sklearn.
Args:
targets: target labels
predictions: predicted labels
output_dict: if True sklearn returns a summary report as dict, if False the
report is in string format
exclude_label: labels to exclude from evaluation
Returns:
Report from sklearn, precision, f1, and accuracy values.
"""
from sklearn import metrics
targets = clean_labels(targets)
predictions = clean_labels(predictions)
labels = get_unique_labels(targets, exclude_label)
if not labels:
logger.warning("No labels to evaluate. Skip evaluation.")
return {}, 0.0, 0.0, 0.0
report = metrics.classification_report(
targets, predictions, labels=labels, output_dict=output_dict
)
precision = metrics.precision_score(
targets, predictions, labels=labels, average="weighted"
)
f1 = metrics.f1_score(targets, predictions, labels=labels, average="weighted")
accuracy = metrics.accuracy_score(targets, predictions)
return report, precision, f1, accuracy
def clean_labels(labels: Iterable[Text]) -> List[Text]:
"""Remove `None` labels. sklearn metrics do not support them.
Args:
labels: list of labels
Returns:
Cleaned labels.
"""
return [label if label is not None else "" for label in labels]
def get_unique_labels(
targets: Iterable[Text], exclude_label: Optional[Text]
) -> List[Text]:
"""Get unique labels. Exclude 'exclude_label' if specified.
Args:
targets: labels
exclude_label: label to exclude
Returns:
Unique labels.
"""
labels = set(targets)
if exclude_label and exclude_label in labels:
labels.remove(exclude_label)
return list(labels)
| RasaHQ/rasa_nlu | rasa/model_testing.py | Python | apache-2.0 | 13,138 |
#
# flags.py: global anaconda flags
#
# Copyright (C) 2001 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import selinux
import shlex
import types
import glob
from pyanaconda.constants import SELINUX_DEFAULT, CMDLINE_APPEND
from collections import OrderedDict
import logging
log = logging.getLogger("anaconda")
# A lot of effort, but it only allows a limited set of flags to be referenced
class Flags(object):
def __setattr__(self, attr, val):
# pylint: disable=no-member
if attr not in self.__dict__ and not self._in_init:
raise AttributeError(attr)
else:
self.__dict__[attr] = val
def get(self, attr, val=None):
return getattr(self, attr, val)
def set_cmdline_bool(self, flag):
if flag in self.cmdline:
setattr(self, flag, self.cmdline.getbool(flag))
def __init__(self, read_cmdline=True):
self.__dict__['_in_init'] = True
self.livecdInstall = False
self.ibft = True
self.usevnc = False
self.vncquestion = True
self.mpath = True
self.dmraid = True
self.selinux = SELINUX_DEFAULT
self.debug = False
self.armPlatform = None
self.preexisting_x11 = False
self.noverifyssl = False
self.imageInstall = False
self.automatedInstall = False
self.dirInstall = False
self.askmethod = False
self.eject = True
self.extlinux = False
self.nombr = False
self.gpt = False
self.leavebootorder = False
self.testing = False
self.mpathFriendlyNames = True
# ksprompt is whether or not to prompt for missing ksdata
self.ksprompt = True
self.rescue_mode = False
self.noefi = False
self.kexec = False
# parse the boot commandline
self.cmdline = BootArgs()
# Lock it down: no more creating new flags!
self.__dict__['_in_init'] = False
if read_cmdline:
self.read_cmdline()
def read_cmdline(self):
for f in ("selinux", "debug", "leavebootorder", "testing", "extlinux",
"nombr", "gpt", "noefi"):
self.set_cmdline_bool(f)
if not selinux.is_selinux_enabled():
self.selinux = 0
cmdline_files = ['/proc/cmdline', '/run/install/cmdline',
'/run/install/cmdline.d/*.conf', '/etc/cmdline']
class BootArgs(OrderedDict):
"""
Hold boot arguments as an OrderedDict.
"""
def __init__(self, cmdline=None, files=None):
"""
Create a BootArgs object.
Reads each of the "files", then parses "cmdline" if it was provided.
"""
OrderedDict.__init__(self)
if files is None:
self.read(cmdline_files)
elif files:
self.read(files)
if cmdline:
self.readstr(cmdline)
def read(self, filenames):
"""
Read and parse a filename (or a list of filenames).
Files that can't be read are silently ignored.
Returns a list of successfully read files.
filenames can contain *, ?, and character ranges expressed with []
"""
readfiles = []
if isinstance(filenames, types.StringType):
filenames = [filenames]
# Expand any filename globs
filenames = [f for g in filenames for f in glob.glob(g)]
for f in filenames:
try:
self.readstr(open(f).read())
readfiles.append(f)
except IOError:
continue
return readfiles
def readstr(self, cmdline):
cmdline = cmdline.strip()
# if the BOOT_IMAGE contains a space, pxelinux will strip one of the
# quotes leaving one at the end that shlex doesn't know what to do
# with
(left, middle, right) = cmdline.rpartition("BOOT_IMAGE=")
if right.count('"') % 2:
cmdline = left + middle + '"' + right
# shlex doesn't properly handle \\ (it removes them)
# which scrambles the spaces used in labels so use underscores
cmdline = cmdline.replace("\\x20", "_")
lst = shlex.split(cmdline)
# options might have the inst. prefix (used to differentiate
# boot options for the installer from other boot options)
inst_prefix = "inst."
for i in lst:
# drop the inst. prefix (if found), so that getbool() works
# consistently for both "foo=0" and "inst.foo=0"
if i.startswith(inst_prefix):
i = i[len(inst_prefix):]
if "=" in i:
(key, val) = i.split("=", 1)
else:
key = i
val = None
# Some duplicate args create a space separated string
if key in CMDLINE_APPEND and self.get(key, None):
if val:
self[key] = self[key] + " " + val
else:
self[key] = val
def getbool(self, arg, default=False):
"""
Return the value of the given arg, as a boolean. The rules are:
- "arg", "arg=val": True
- "noarg", "noarg=val", "arg=[0|off|no]": False
"""
result = default
for a in self:
if a == arg:
if self[arg] in ("0", "off", "no"):
result = False
else:
result = True
elif a == 'no'+arg:
result = False # XXX: should noarg=off -> True?
return result
def can_touch_runtime_system(msg, touch_live=False):
"""
Guard that should be used before doing actions that modify runtime system.
:param msg: message to be logged in case that runtime system cannot be touched
:type msg: str
:param touch_live: whether to allow touching liveCD installation system
:type touch_live: bool
:rtype: bool
"""
if flags.livecdInstall and not touch_live:
log.info("Not doing '%s' in live installation", msg)
return False
if flags.imageInstall:
log.info("Not doing '%s' in image installation", msg)
return False
if flags.dirInstall:
log.info("Not doing '%s' in directory installation", msg)
return False
if flags.testing:
log.info("Not doing '%s', because we are just testing", msg)
return False
return True
flags = Flags()
| vojtechtrefny/anaconda | pyanaconda/flags.py | Python | gpl-2.0 | 7,069 |
import logging
from .engine import SimEngine
from .successors import SimSuccessors
l = logging.getLogger("angr.engines.hook")
# pylint: disable=abstract-method,unused-argument,arguments-differ
class SimEngineHook(SimEngine):
def _check(self, state, procedure=None, **kwargs):
# we have not yet entered the next step - we should check the "current" jumpkind
if state.history.jumpkind == 'Ijk_NoHook':
return False
if state._ip.symbolic:
# symbolic IP is not supported
return False
if procedure is None:
if state.addr not in self.project._sim_procedures:
if state.arch.name.startswith('ARM') and state.addr & 1 == 1 and state.addr - 1 in self.project._sim_procedures:
return True
return False
return True
def process(self, state, procedure=None, force_addr=None, **kwargs):
"""
Perform execution with a state.
:param state: The state with which to execute
:param procedure: An instance of a SimProcedure to run, optional
:param ret_to: The address to return to when this procedure is finished
:param inline: This is an inline execution. Do not bother copying the state.
:param force_addr: Force execution to pretend that we're working at this concrete address
:returns: A SimSuccessors object categorizing the execution's successor states
"""
addr = state.addr if force_addr is None else force_addr
if procedure is None:
if addr not in self.project._sim_procedures:
if state.arch.name.startswith('ARM') and addr & 1 == 1 and addr - 1 in self.project._sim_procedures:
procedure = self.project._sim_procedures[addr - 1]
else:
return SimSuccessors.failure()
else:
procedure = self.project._sim_procedures[addr]
l.debug("Running %s (originally at %#x)", repr(procedure), addr)
return self.project.factory.procedure_engine.process(state, procedure, force_addr=force_addr, **kwargs)
| tyb0807/angr | angr/engines/hook.py | Python | bsd-2-clause | 2,179 |
# Copyright (C) 2010-2013 Henry Ludemann
#
# This file is part of the bdec decoder library.
#
# The bdec decoder library is free software; you can redistribute it
# and/or modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# The bdec decoder library is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see
# <http://www.gnu.org/licenses/>.
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (c) 2010, PRESENSE Technologies GmbH
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the PRESENSE Technologies GmbH nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL PRESENSE Technologies GmbH BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from optparse import OptionParser
import sys
import bdec
import bdec.data as dt
from bdec.spec import load_specs
import bdec.output.xmlout as xmlout
__doc__ = '''%s <spec 1> [spec 2]...
Encode xml to binary given a bdec specification. It will read the xml to from
encode from stdin.''' % sys.argv[0]
def main():
parser = OptionParser(usage=__doc__)
parser.add_option('-f', dest='filename', help='Read the xml from FILE '
'instead of stdin.', metavar='FILENAME')
parser.add_option('--main', dest='main', help='Specify the entry to '
'be encoded instead of the toplevel protocol object.',
metavar='ENTRY')
parser.add_option('--remove-unused', dest='remove_unused', help='Remove '
'unused entries from the specification after loading. This allows '
"specs to include references that don't resolve.", action='store_true')
options, args = parser.parse_args()
if not args:
sys.exit("No specifications listed! See '%s -h' for more info." % sys.argv[0])
try:
protocol, common, lookup = load_specs([(spec, None, None) for spec in args],
options.main, options.remove_unused)
except bdec.spec.LoadError, ex:
sys.exit(str(ex))
if options.filename:
xml = file(options.filename, 'rb').read()
else:
xml = sys.stdin.read()
try:
binary = xmlout.encode(protocol, xml).bytes()
except bdec.DecodeError, ex:
try:
(filename, line_number, column_number) = lookup[ex.entry]
except KeyError:
(filename, line_number, column_number) = ('unknown', 0, 0)
sys.exit(("%s[%i]: %s" % (filename, line_number, ex)).encode('utf8'))
sys.stdout.write(binary)
if __name__ == '__main__':
main()
| asdf1011/bdec | bdec/tools/encode.py | Python | lgpl-3.0 | 4,315 |
"""
Specific overrides to the base prod settings to make development easier.
"""
from .aws import * # pylint: disable=wildcard-import, unused-wildcard-import
# Don't use S3 in devstack, fall back to filesystem
del DEFAULT_FILE_STORAGE
MEDIA_ROOT = "/edx/var/edxapp/uploads"
DEBUG = True
USE_I18N = True
TEMPLATE_DEBUG = True
SITE_NAME = 'localhost:8000'
PLATFORM_NAME = ENV_TOKENS.get('PLATFORM_NAME', 'Devstack')
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
################################ LOGGERS ######################################
import logging
# Disable noisy loggers
for pkg_name in ['track.contexts', 'track.middleware', 'dd.dogapi']:
logging.getLogger(pkg_name).setLevel(logging.CRITICAL)
################################ EMAIL ########################################
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
FEATURES['ENABLE_INSTRUCTOR_EMAIL'] = True # Enable email for all Studio courses
FEATURES['REQUIRE_COURSE_EMAIL_AUTH'] = False # Give all courses email (don't require django-admin perms)
########################## ANALYTICS TESTING ########################
ANALYTICS_SERVER_URL = "http://127.0.0.1:9000/"
ANALYTICS_API_KEY = ""
# Set this to the dashboard URL in order to display the link from the
# dashboard to the Analytics Dashboard.
ANALYTICS_DASHBOARD_URL = None
################################ DEBUG TOOLBAR ################################
INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo')
MIDDLEWARE_CLASSES += (
'django_comment_client.utils.QueryCountDebugMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar_mongo.panel.MongoDebugPanel',
# ProfilingPanel has been intentionally removed for default devstack.py
# runtimes for performance reasons. If you wish to re-enable it in your
# local development environment, please create a new settings file
# that imports and extends devstack.py.
)
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': 'lms.envs.devstack.should_show_debug_toolbar'
}
def should_show_debug_toolbar(_):
return True # We always want the toolbar on devstack regardless of IP, auth, etc.
########################### PIPELINE #################################
# # Skip RequireJS optimizer in development
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
# Whether to run django-require in debug mode.
REQUIRE_DEBUG = DEBUG
PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT)
########################### VERIFIED CERTIFICATES #################################
FEATURES['AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'] = True
FEATURES['ENABLE_PAYMENT_FAKE'] = True
CC_PROCESSOR_NAME = 'CyberSource2'
CC_PROCESSOR = {
'CyberSource2': {
"PURCHASE_ENDPOINT": '/shoppingcart/payment_fake/',
"SECRET_KEY": 'abcd123',
"ACCESS_KEY": 'abcd123',
"PROFILE_ID": 'edx',
}
}
########################### External REST APIs #################################
FEATURES['ENABLE_OAUTH2_PROVIDER'] = True
OAUTH_OIDC_ISSUER = 'http://127.0.0.1:8000/oauth2'
FEATURES['ENABLE_MOBILE_REST_API'] = True
FEATURES['ENABLE_VIDEO_ABSTRACTION_LAYER_API'] = True
########################## SECURITY #######################
FEATURES['ENFORCE_PASSWORD_POLICY'] = False
FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] = False
FEATURES['SQUELCH_PII_IN_LOGS'] = False
FEATURES['PREVENT_CONCURRENT_LOGINS'] = False
FEATURES['ADVANCED_SECURITY'] = False
PASSWORD_MIN_LENGTH = None
PASSWORD_COMPLEXITY = {}
########################### Milestones #################################
FEATURES['MILESTONES_APP'] = True
########################### Entrance Exams #################################
FEATURES['ENTRANCE_EXAMS'] = True
################################ COURSE LICENSES ################################
FEATURES['LICENSING'] = True
########################## Courseware Search #######################
FEATURES['ENABLE_COURSEWARE_SEARCH'] = False
SEARCH_ENGINE = "search.elastic.ElasticSearchEngine"
########################## Dashboard Search #######################
FEATURES['ENABLE_DASHBOARD_SEARCH'] = True
########################## Certificates Web/HTML View #######################
FEATURES['CERTIFICATES_HTML_VIEW'] = True
########################## Course Discovery #######################
from django.utils.translation import ugettext as _
LANGUAGE_MAP = {'terms': {lang: display for lang, display in ALL_LANGUAGES}, 'name': _('Language')}
COURSE_DISCOVERY_MEANINGS = {
'org': {
'name': _('Organization'),
},
'modes': {
'name': _('Course Type'),
'terms': {
'honor': _('Honor'),
'verified': _('Verified'),
},
},
'language': LANGUAGE_MAP,
}
FEATURES['ENABLE_COURSE_DISCOVERY'] = True
FEATURES['COURSES_ARE_BROWSEABLE'] = True
HOMEPAGE_COURSE_MAX = 9
# Software secure fake page feature flag
FEATURES['ENABLE_SOFTWARE_SECURE_FAKE'] = True
# Setting for the testing of Software Secure Result Callback
VERIFY_STUDENT["SOFTWARE_SECURE"] = {
"API_ACCESS_KEY": "BBBBBBBBBBBBBBBBBBBB",
"API_SECRET_KEY": "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
}
# Skip enrollment start date filtering
SEARCH_SKIP_ENROLLMENT_START_DATE_FILTERING = True
########################## Shopping cart ##########################
FEATURES['ENABLE_SHOPPING_CART'] = True
FEATURES['STORE_BILLING_INFO'] = True
FEATURES['ENABLE_PAID_COURSE_REGISTRATION'] = True
FEATURES['ENABLE_COSMETIC_DISPLAY_PRICE'] = True
########################## Third Party Auth #######################
if FEATURES.get('ENABLE_THIRD_PARTY_AUTH') and 'third_party_auth.dummy.DummyBackend' not in AUTHENTICATION_BACKENDS:
AUTHENTICATION_BACKENDS = ['third_party_auth.dummy.DummyBackend'] + list(AUTHENTICATION_BACKENDS)
############## ECOMMERCE API CONFIGURATION SETTINGS ###############
ECOMMERCE_PUBLIC_URL_ROOT = "http://localhost:8002"
###################### Cross-domain requests ######################
FEATURES['ENABLE_CORS_HEADERS'] = True
CORS_ALLOW_CREDENTIALS = True
CORS_ORIGIN_WHITELIST = ()
CORS_ORIGIN_ALLOW_ALL = True
#####################################################################
# See if the developer has any local overrides.
try:
from .private import * # pylint: disable=wildcard-import
except ImportError:
pass
#####################################################################
# Lastly, run any migrations, if needed.
MODULESTORE = convert_module_store_setting_if_needed(MODULESTORE)
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
| atsolakid/edx-platform | lms/envs/devstack.py | Python | agpl-3.0 | 7,126 |
""" norecurse.py | UTF-8 | Mon, Jan 23, 2017 | Roman S. Collins
An implementation of calculating Fibonacci of n using various functions,
while plotting their efficiency in miliseconds.
Problem:
http://cs.marlboro.edu/courses/spring2017/algorithms/special/assignments
Find the ratio of consecutive Fibonnaci numbers for large n.
Bonus: Make a plot of n vs Fib[n], fit to a curve of the form, and discuss.
Dependencies:
- python matplotlib library
- python sympy library
TODO:
- Plot using matplotlib
- Create many Fibonacci of n functions
Sources:
- 5 Ways of Fibonacci in Python:
https://technobeans.com/2012/04/16/5-ways-of-fibonacci-in-python/
- Five Ways to Calculate Fibonacci Numbers with Python Code
http://sahandsaba.com/five-ways-to-calculate-fibonacci-numbers-with-python-code.html
"""
import time, sys
import matplotlib.pyplot as pyplot
sys.setrecursionlimit(18000)
a, b = 0, 1
class Fibby():
#TODO
# Plot function!!!
def plot(self):
pass
# Source: Python tutoring with Dylan
# This method uses "tail recursion"
# http://pastebin.com/89dxMK2L
def fib1(self, n):
prev = {}
# Because if n is equal to 0 or 1, Fibonacci of n
# will equal 0 or 1 respectively
if (n == 0) or (n == 1):
return n
# Tail recursion
# If n has already been calculated don't calculate it
# over again
if n in prev:
return prev[n]
# prev[n] is the last and second to last n
# added together
prev[n] = self.fib1(n-1) + self.fib1(n-2)
return prev[n]
# Source: A YouTube video
# https://www.youtube.com/watch?v=CKPciT2ROL8
# https://dl.dropboxusercontent.com/u/4904309/fib-spiral.py
# This method is a modified version of the source above
# and is very "recursive"
def fib2(self, n):
# let n be number of iterations
# Gold().fibby(10) returns 55:
# 1, 1, 2, 3, 5, 8, 13, 21, 34, 55
ns = []
n1, n2 = 0, 1
if (n == 0) or (n == 1):
return n
else:
for iteration in range(n - 1):
n = n1 + n2
n1 = n2
n2 = n
ns.append(n)
return n
# Source: 5 Ways of Fibonacci in Python
# Example 1: Using looping technique
def fib3(self, n):
a, b = 1, 1
for i in range(n - 1):
a, b = b, a + b
return a
# Source: 5 Ways of Fibonacci in Python
# Example 2: Using recursion
# Nearly identical to my fib2()
def fib4(self, n):
if (n == 1) or (n == 2):
return n
return self.fib4(n - 1) + self.fib4(n - 2)
# Source: 5 Ways of Fibonacci in Python
# Example 3: Using generators
# Adapted for Python
# https://stackoverflow.com/questions/12274606/theres-no-next-function-in-a-yield-generator-in-python-3
def fib5(self):
global a, b
while True:
a, b = b, a + b
yield a
# Hmmm... Memoization.
# # Source: 5 Ways of Fibonacci in Python
# # Example 4: Using Memoization
# def fib6(self, fn, arg):
# memo = {}
#
# if arg not in memo:
# memo[arg] = fn(arg)
#
# return memo[arg]
#
#class Memoize:
# def __init__(self, fn):
# self.fn = fn
# self.memo = {}
# def __call__(self, arg):
# if arg not in self.memo:
# self.memo[arg] = self.fn(arg)
# return self.memo[arg]
#
#@Memoize
#def fib(n):
# a,b = 1,1
# for i in range(n-1):
# a,b = b,a+b
# return a
#print fib(5)
def main():
#n = int(input('Calculate Fibonacci of?: '))
#n_plus = (int(n) + 1)
fibby = Fibby()
#print(fibby.fib1(n) / fibby.fib1(n + 1))
#print(fibby.fib2(n) / fibby.fib2(n + 1))
#print(fibby.fib3(n) / fibby.fib3(n + 1))
#print(fibby.fib5(n) / fibby.fib5(n + 1))
#print(fibby.fib6(n) / fibby.fib6(n + 1))
#fibm = memoize(fib,5)
#print fibm
# fib1
fib1_results = []
fib1_times = []
# fib2
fib2_results = []
fib2_times = []
# fib3
fib3_results = []
fib3_times = []
# fib4
fib4_results = []
fib4_times = []
# fib5
fib5_results = []
fib5_times = []
fib_of = 30
for n in range(fib_of):
# Make many veriables to ensure no false positive
fib1_n, fib2_n, fib3_n, fib4_n = n, n, n, n
# fib1
start_time = time.time()
fibby.fib1(fib1_n)
end_time = time.time()
fib1_results.append(fib1_n)
fib1_times.append(end_time - start_time)
pyplot.plot(fib1_results, fib1_times)
# fib2
start_time = time.time()
fibby.fib2(fib2_n)
end_time = time.time()
fib2_results.append(fib2_n)
fib2_times.append(end_time - start_time)
pyplot.plot(fib2_results, fib2_times)
# fib3
start_time = time.time()
fibby.fib3(fib3_n)
end_time = time.time()
fib3_results.append(fib3_n)
fib3_times.append(end_time - start_time)
pyplot.plot(fib3_results, fib3_times)
# fib4
#start_time = time.time()
#fibby.fib4(fib4_n)
#end_time = time.time()
#fib4_results.append(fib4_n)
#fib4_times.append(end_time - start_time)
#pyplot.plot(fib4_results, fib4_times)
# fib5
start_time = time.time()
fib5_n = 30
for i in range(fib5_n):
print(i, next(fibby.fib5()))
#print(next(fibby.fib5()))
#print(next(fibby.fib5()))
end_time = time.time()
fib5_results.append(fib5_n)
fib5_times.append(end_time - start_time)
pyplot.plot(fib5_results, fib5_times)
pyplot.xlabel('n')
pyplot.ylabel('time')
pyplot.title('The Ways of Fibonacci: ')
pyplot.grid(True)
#pyplot.savefig("test.png")
pyplot.show()
if __name__ == '__main__':
main()
| RomanSC/algorithms | chapter-1/norecurse.py | Python | gpl-3.0 | 6,034 |
"""Config flow for Awair."""
from typing import Optional
from python_awair import Awair
from python_awair.exceptions import AuthError, AwairError
import voluptuous as vol
from homeassistant.config_entries import CONN_CLASS_CLOUD_POLL, ConfigFlow
from homeassistant.const import CONF_ACCESS_TOKEN
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import DOMAIN, LOGGER
class AwairFlowHandler(ConfigFlow, domain=DOMAIN):
"""Config flow for Awair."""
VERSION = 1
CONNECTION_CLASS = CONN_CLASS_CLOUD_POLL
async def async_step_import(self, conf: dict):
"""Import a configuration from config.yaml."""
if self.hass.config_entries.async_entries(DOMAIN):
return self.async_abort(reason="already_setup")
user, error = await self._check_connection(conf[CONF_ACCESS_TOKEN])
if error is not None:
return self.async_abort(reason=error)
await self.async_set_unique_id(user.email)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=f"{user.email} ({user.user_id})",
data={CONF_ACCESS_TOKEN: conf[CONF_ACCESS_TOKEN]},
)
async def async_step_user(self, user_input: Optional[dict] = None):
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
user, error = await self._check_connection(user_input[CONF_ACCESS_TOKEN])
if user is not None:
await self.async_set_unique_id(user.email)
self._abort_if_unique_id_configured()
title = f"{user.email} ({user.user_id})"
return self.async_create_entry(title=title, data=user_input)
if error != "invalid_access_token":
return self.async_abort(reason=error)
errors = {CONF_ACCESS_TOKEN: "invalid_access_token"}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({vol.Required(CONF_ACCESS_TOKEN): str}),
errors=errors,
)
async def async_step_reauth(self, user_input: Optional[dict] = None):
"""Handle re-auth if token invalid."""
errors = {}
if user_input is not None:
access_token = user_input[CONF_ACCESS_TOKEN]
_, error = await self._check_connection(access_token)
if error is None:
for entry in self._async_current_entries():
if entry.unique_id == self.unique_id:
self.hass.config_entries.async_update_entry(
entry, data=user_input
)
return self.async_abort(reason="reauth_successful")
if error != "invalid_access_token":
return self.async_abort(reason=error)
errors = {CONF_ACCESS_TOKEN: error}
return self.async_show_form(
step_id="reauth",
data_schema=vol.Schema({vol.Required(CONF_ACCESS_TOKEN): str}),
errors=errors,
)
async def _check_connection(self, access_token: str):
"""Check the access token is valid."""
session = async_get_clientsession(self.hass)
awair = Awair(access_token=access_token, session=session)
try:
user = await awair.user()
devices = await user.devices()
if not devices:
return (None, "no_devices_found")
return (user, None)
except AuthError:
return (None, "invalid_access_token")
except AwairError as err:
LOGGER.error("Unexpected API error: %s", err)
return (None, "unknown")
| partofthething/home-assistant | homeassistant/components/awair/config_flow.py | Python | apache-2.0 | 3,743 |
# -*- encoding: utf-8 -*-
from nose.tools import *
from nose import SkipTest
import networkx as nx
from networkx.utils import *
def test_is_string_like():
assert_true(is_string_like("aaaa"))
assert_false(is_string_like(None))
assert_false(is_string_like(123))
def test_iterable():
assert_false(iterable(None))
assert_false(iterable(10))
assert_true(iterable([1, 2, 3]))
assert_true(iterable((1, 2, 3)))
assert_true(iterable({1: "A", 2: "X"}))
assert_true(iterable("ABC"))
def test_graph_iterable():
K = nx.complete_graph(10)
assert_true(iterable(K))
assert_true(iterable(K.nodes()))
assert_true(iterable(K.edges()))
def test_is_list_of_ints():
assert_true(is_list_of_ints([1, 2, 3, 42]))
assert_false(is_list_of_ints([1, 2, 3, "kermit"]))
def test_random_number_distribution():
# smoke test only
z = powerlaw_sequence(20, exponent=2.5)
z = discrete_sequence(20, distribution=[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3])
def test_make_str_with_bytes():
import sys
PY2 = sys.version_info[0] == 2
x = "qualité"
y = make_str(x)
if PY2:
assert_true(isinstance(y, unicode))
# Since file encoding is utf-8, the é will be two bytes.
assert_true(len(y) == 8)
else:
assert_true(isinstance(y, str))
assert_true(len(y) == 7)
def test_make_str_with_unicode():
import sys
PY2 = sys.version_info[0] == 2
if PY2:
x = unicode("qualité", encoding='utf-8')
y = make_str(x)
assert_true(isinstance(y, unicode))
assert_true(len(y) == 7)
else:
x = "qualité"
y = make_str(x)
assert_true(isinstance(y, str))
assert_true(len(y) == 7)
class TestNumpyArray(object):
@classmethod
def setupClass(cls):
global numpy
global assert_allclose
try:
import numpy
from numpy.testing import assert_allclose
except ImportError:
raise SkipTest('NumPy not available.')
def test_dict_to_numpy_array1(self):
d = {'a': 1, 'b': 2}
a = dict_to_numpy_array1(d, mapping={'a': 0, 'b': 1})
assert_allclose(a, numpy.array([1, 2]))
a = dict_to_numpy_array1(d, mapping={'b': 0, 'a': 1})
assert_allclose(a, numpy.array([2, 1]))
a = dict_to_numpy_array1(d)
assert_allclose(a.sum(), 3)
def test_dict_to_numpy_array2(self):
d = {'a': {'a': 1, 'b': 2},
'b': {'a': 10, 'b': 20}}
mapping = {'a': 1, 'b': 0}
a = dict_to_numpy_array2(d, mapping=mapping)
assert_allclose(a, numpy.array([[20, 10], [2, 1]]))
a = dict_to_numpy_array2(d)
assert_allclose(a.sum(), 33)
def test_dict_to_numpy_array_a(self):
d = {'a': {'a': 1, 'b': 2},
'b': {'a': 10, 'b': 20}}
mapping = {'a': 0, 'b': 1}
a = dict_to_numpy_array(d, mapping=mapping)
assert_allclose(a, numpy.array([[1, 2], [10, 20]]))
mapping = {'a': 1, 'b': 0}
a = dict_to_numpy_array(d, mapping=mapping)
assert_allclose(a, numpy.array([[20, 10], [2, 1]]))
a = dict_to_numpy_array2(d)
assert_allclose(a.sum(), 33)
def test_dict_to_numpy_array_b(self):
d = {'a': 1, 'b': 2}
mapping = {'a': 0, 'b': 1}
a = dict_to_numpy_array(d, mapping=mapping)
assert_allclose(a, numpy.array([1, 2]))
a = dict_to_numpy_array1(d)
assert_allclose(a.sum(), 3)
def test_pairwise():
nodes = range(4)
node_pairs = [(0, 1), (1, 2), (2, 3)]
node_pairs_cycle = node_pairs + [(3, 0)]
assert_equal(list(pairwise(nodes)), node_pairs)
assert_equal(list(pairwise(iter(nodes))), node_pairs)
assert_equal(list(pairwise(nodes, cyclic=True)), node_pairs_cycle)
empty_iter = iter(())
assert_equal(list(pairwise(empty_iter)), [])
empty_iter = iter(())
assert_equal(list(pairwise(empty_iter, cyclic=True)), [])
def test_groups():
many_to_one = dict(zip('abcde', [0, 0, 1, 1, 2]))
actual = groups(many_to_one)
expected = {0: {'a', 'b'}, 1: {'c', 'd'}, 2: {'e'}}
assert_equal(actual, expected)
assert_equal({}, groups({}))
def test_to_tuple():
a_list = [1, 2, [1, 3]]
actual = to_tuple(a_list)
expected = (1, 2, (1, 3))
assert_equal(actual, expected)
a_tuple = (1, 2)
actual = to_tuple(a_tuple)
expected = a_tuple
assert_equal(actual, expected)
a_mix = (1, 2, [1, 3])
actual = to_tuple(a_mix)
expected = (1, 2, (1, 3))
assert_equal(actual, expected)
def test_create_random_state():
try:
import numpy as np
except ImportError:
raise SkipTest('numpy not available.')
rs = np.random.RandomState
assert_true(isinstance(create_random_state(1), rs))
assert_true(isinstance(create_random_state(None), rs))
assert_true(isinstance(create_random_state(np.random), rs))
assert_true(isinstance(create_random_state(rs(1)), rs))
assert_raises(ValueError, create_random_state, 'a')
assert_true(np.all((rs(1).rand(10) == create_random_state(1).rand(10))))
def test_create_py_random_state():
pyrs = random.Random
assert_true(isinstance(create_py_random_state(1), pyrs))
assert_true(isinstance(create_py_random_state(None), pyrs))
assert_true(isinstance(create_py_random_state(pyrs(1)), pyrs))
assert_raises(ValueError, create_py_random_state, 'a')
try:
import numpy as np
except ImportError:
raise SkipTest('numpy not available.')
rs = np.random.RandomState
nprs = PythonRandomInterface
assert_true(isinstance(create_py_random_state(np.random), nprs))
assert_true(isinstance(create_py_random_state(rs(1)), nprs))
# test default rng input
assert_true(isinstance(PythonRandomInterface(), nprs))
def test_PythonRandomInterface():
try:
import numpy as np
except ImportError:
raise SkipTest('numpy not available.')
rs = np.random.RandomState
rng = PythonRandomInterface(rs(42))
rs42 = rs(42)
# make sure these functions are same as expected outcome
assert_equal(rng.randrange(3, 5), rs42.randint(3, 5))
assert_true(np.all(rng.choice([1, 2, 3]) == rs42.choice([1, 2, 3])))
assert_equal(rng.gauss(0, 1), rs42.normal(0, 1))
assert_equal(rng.expovariate(1.5), rs42.exponential(1/1.5))
assert_true(np.all(rng.shuffle([1, 2, 3]) == rs42.shuffle([1, 2, 3])))
assert_true(np.all(rng.sample([1, 2, 3], 2) ==
rs42.choice([1, 2, 3], (2,), replace=False)))
assert_equal(rng.randint(3, 5), rs42.randint(3, 6))
assert_equal(rng.random(), rs42.random_sample())
| kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/networkx/utils/tests/test_misc.py | Python | gpl-3.0 | 6,689 |
#pragma repy restrictions.oneoutgoingsocket
if callfunc == "initialize":
def foo():
a = openconn("127.0.0.1", <connport>)
def bar(a,b,c,d,e):
mycontext["count"] += 1
if mycontext["count"] >= 6:
stopcomm(e)
mycontext["count"] = 0
waitforconn("127.0.0.1", <connport>, bar)
foo()
foo()
foo()
foo()
foo()
foo()
| sburnett/seattle | repy/tests/ut_repytests_oneoutgoingsocket-testemulcommgc.py | Python | mit | 354 |
# -*- coding: utf-8 -*-
# =========================================================================
# Copyright (C) 2016 Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from __future__ import unicode_literals
import os
from .transfer import TransferCommand
from ..utils import is_pattern_match, to_unix_path, uni_print
class MvCommand(TransferCommand):
command = "mv"
usage = (
"%(prog)s <source-path> <dest-path> [-c <conf_file> "
"-r <recusively> --exclude <pattern value> --include <pattern value> --rate-limit <pattern value>]"
)
@classmethod
def clean_empty_dirs(cls):
local_dirs = []
for rt, dirs, files in os.walk(cls.options.source_path):
for d in dirs:
local_dirs.append(os.path.join(rt, d))
for local_dir in local_dirs[::-1]:
key_path = os.path.relpath(local_dir, cls.options.source_path) + "/"
key_path = to_unix_path(key_path)
# Delete empty directory.
if not os.listdir(local_dir) and is_pattern_match(
key_path, cls.options.exclude, cls.options.include
):
os.rmdir(local_dir)
uni_print("Local directory '%s' deleted" % local_dir)
| Fiile/qsctl | qingstor/qsctl/commands/mv.py | Python | apache-2.0 | 1,937 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (C) 2021: SCS Software
import bpy
from io_scs_tools.internals.shaders.std_node_groups import animsheet_xfade_ng
from io_scs_tools.consts import Material as _MAT_consts
FADESHEET_COMPUTE_G = _MAT_consts.node_group_prefix + "FadesheetCompute"
_XFADE_NODE = "SheetXFade"
_FRAMEX_COMBINE_NODE_PREFIX = "FrameCombine"
_FRAMEX_MULT_Y_NODE_PREFIX = "Frame*-Y"
_FRAMEX_MULT_SIZE_NODE_PREFIX = "Frame*-Y*FrameSize"
_FRAMEX_UV_ADD_NODE_PREFIX = "UV+(Frame-Y*FrameSize)"
def get_node_group():
"""Gets node group.
:return: node group
:rtype: bpy.types.NodeGroup
"""
if __group_needs_recreation__():
__create_node_group__()
return bpy.data.node_groups[FADESHEET_COMPUTE_G]
def __group_needs_recreation__():
"""Tells if group needs recreation.
:return: True group isn't up to date and has to be (re)created; False if group doesn't need to be (re)created
:rtype: bool
"""
# current checks:
# 1. group existence in blender data block
return FADESHEET_COMPUTE_G not in bpy.data.node_groups
def __create_node_group__():
"""Creates group for computing of fadesheet frames and transforming UVs.
Inputs: Float, Float, Float, Vector, Vector
Outputs: Vector, Vector, Float
"""
start_pos_x = 0
start_pos_y = 0
pos_x_shift = 185
if FADESHEET_COMPUTE_G not in bpy.data.node_groups: # creation
fadesheet_compute_g = bpy.data.node_groups.new(type="ShaderNodeTree", name=FADESHEET_COMPUTE_G)
else: # recreation
fadesheet_compute_g = bpy.data.node_groups[FADESHEET_COMPUTE_G]
# delete all inputs and outputs
fadesheet_compute_g.inputs.clear()
fadesheet_compute_g.outputs.clear()
# delete all old nodes and links as they will be recreated now with actual version
fadesheet_compute_g.nodes.clear()
# inputs defining
fadesheet_compute_g.inputs.new("NodeSocketFloat", "FPS")
fadesheet_compute_g.inputs.new("NodeSocketFloat", "FramesRow")
fadesheet_compute_g.inputs.new("NodeSocketFloat", "FramesTotal")
fadesheet_compute_g.inputs.new("NodeSocketVector", "FrameSize")
fadesheet_compute_g.inputs.new("NodeSocketVector", "UV")
# outputs defining
fadesheet_compute_g.outputs.new("NodeSocketVector", "UV0")
fadesheet_compute_g.outputs.new("NodeSocketVector", "UV1")
fadesheet_compute_g.outputs.new("NodeSocketFloat", "FrameBlend")
# node creation
input_n = fadesheet_compute_g.nodes.new("NodeGroupInput")
input_n.location = (start_pos_x, start_pos_y)
output_n = fadesheet_compute_g.nodes.new("NodeGroupOutput")
output_n.location = (start_pos_x + pos_x_shift * 6, start_pos_y)
xfade_node = fadesheet_compute_g.nodes.new("ShaderNodeGroup")
xfade_node.name = xfade_node.label = _XFADE_NODE
xfade_node.location = (start_pos_x + pos_x_shift * 1, start_pos_y)
xfade_node.node_tree = animsheet_xfade_ng.get_node_group()
for frame_idx in (0, 1):
frame_combine_n = fadesheet_compute_g.nodes.new("ShaderNodeCombineXYZ")
frame_combine_n.name = frame_combine_n.label = _FRAMEX_COMBINE_NODE_PREFIX + str(frame_idx)
frame_combine_n.location = (start_pos_x + pos_x_shift * 2, start_pos_y - 200 * frame_idx)
frame_combine_n.inputs['Z'].default_value = 0.0
frame_mult_y_n = fadesheet_compute_g.nodes.new("ShaderNodeVectorMath")
frame_mult_y_n.name = frame_mult_y_n.label = _FRAMEX_MULT_Y_NODE_PREFIX + str(frame_idx)
frame_mult_y_n.location = (start_pos_x + pos_x_shift * 3, start_pos_y - 200 * frame_idx)
frame_mult_y_n.operation = "MULTIPLY"
frame_mult_y_n.inputs[1].default_value = (1.0, -1.0, 0.0)
frame_mult_size_n = fadesheet_compute_g.nodes.new("ShaderNodeVectorMath")
frame_mult_size_n.name = frame_mult_size_n.label = _FRAMEX_MULT_SIZE_NODE_PREFIX + str(frame_idx)
frame_mult_size_n.location = (start_pos_x + pos_x_shift * 4, start_pos_y - 200 * frame_idx)
frame_mult_size_n.operation = "MULTIPLY"
frame_uv_add_n = fadesheet_compute_g.nodes.new("ShaderNodeVectorMath")
frame_uv_add_n.name = frame_uv_add_n.label = _FRAMEX_UV_ADD_NODE_PREFIX + str(frame_idx)
frame_uv_add_n.location = (start_pos_x + pos_x_shift * 5, start_pos_y - 200 * frame_idx)
frame_uv_add_n.operation = "ADD"
# create links
fadesheet_compute_g.links.new(xfade_node.inputs['FPS'], input_n.outputs['FPS'])
fadesheet_compute_g.links.new(xfade_node.inputs['FramesTotal'], input_n.outputs['FramesTotal'])
fadesheet_compute_g.links.new(xfade_node.inputs['FramesRow'], input_n.outputs['FramesRow'])
for frame_idx in (0, 1):
frame_combine_n = fadesheet_compute_g.nodes[_FRAMEX_COMBINE_NODE_PREFIX + str(frame_idx)]
frame_mult_y_n = fadesheet_compute_g.nodes[_FRAMEX_MULT_Y_NODE_PREFIX + str(frame_idx)]
frame_mult_size_n = fadesheet_compute_g.nodes[_FRAMEX_MULT_SIZE_NODE_PREFIX + str(frame_idx)]
frame_uv_add_n = fadesheet_compute_g.nodes[_FRAMEX_UV_ADD_NODE_PREFIX + str(frame_idx)]
fadesheet_compute_g.links.new(frame_combine_n.inputs['X'], xfade_node.outputs['Frame' + str(frame_idx) + 'X'])
fadesheet_compute_g.links.new(frame_combine_n.inputs['Y'], xfade_node.outputs['Frame' + str(frame_idx) + 'Y'])
fadesheet_compute_g.links.new(frame_mult_y_n.inputs[0], frame_combine_n.outputs[0])
fadesheet_compute_g.links.new(frame_mult_size_n.inputs[0], frame_mult_y_n.outputs[0])
fadesheet_compute_g.links.new(frame_mult_size_n.inputs[1], input_n.outputs['FrameSize'])
fadesheet_compute_g.links.new(frame_uv_add_n.inputs[0], frame_mult_size_n.outputs[0])
fadesheet_compute_g.links.new(frame_uv_add_n.inputs[1], input_n.outputs['UV'])
fadesheet_compute_g.links.new(output_n.inputs['UV' + str(frame_idx)], frame_uv_add_n.outputs[0])
fadesheet_compute_g.links.new(output_n.inputs['FrameBlend'], xfade_node.outputs['FrameBlend'])
| SCSSoftware/BlenderTools | addon/io_scs_tools/internals/shaders/flavors/fadesheet/fadesheet_compute_ng.py | Python | gpl-2.0 | 6,765 |
"""
Implement the Schwartizan Transform method of sorting
a list by an arbitrary metric (see the Python FAQ section
4.51).
--------------------------------------------------------------------
This program is licensed under the GNU General Public License (GPL).
See http://www.fsf.org for details of the license.
Andrew Sterian
Padnos School of Engineering
Grand Valley State University
<steriana@claymore.engineer.gvsu.edu>
<http://claymore.engineer.gvsu.edu/~steriana>
"""
def stripit(pair):
return pair[1]
def schwartz(List, Metric):
def pairing(element, M = Metric):
return (M(element), element)
paired = map(pairing, List)
paired.sort()
return map(stripit, paired)
def stripit2(pair):
return pair[0]
def schwartz2(List, Metric):
"Returns sorted list and also corresponding metrics"
def pairing(element, M = Metric):
return (M(element), element)
paired = map(pairing, List)
paired.sort()
theList = map(stripit, paired)
theMetrics = map(stripit2, paired)
return (theList, theMetrics)
| logxen/Fritzing | tools/gerb-merge/gerbmerge/schwartz.py | Python | gpl-3.0 | 1,029 |
from __future__ import absolute_import
from rest_framework import permissions
# from sentry.models.apikey import ROOT_KEY
# from sentry.auth.utils import is_privileged_request
class NoPermission(permissions.BasePermission):
def has_permission(self, request, view):
return False
class ScopedPermission(permissions.BasePermission):
"""
Permissions work depending on the type of authentication:
- A user inherits permissions based on their membership role. These are
still dictated as common scopes, but they can't be checked until the
has_object_permission hook is called.
- ProjectKeys (legacy) are granted only project based scopes. This
- APIKeys specify their scope, and work as expected.
"""
scope_map = {
'HEAD': (),
'GET': (),
'POST': (),
'PUT': (),
'PATCH': (),
'DELETE': (),
}
def has_permission(self, request, view):
# session-based auth has all scopes for a logged in user
if not request.auth:
return request.user.is_authenticated()
allowed_scopes = set(self.scope_map.get(request.method, []))
current_scopes = request.auth.get_scopes()
return any(s in allowed_scopes for s in current_scopes)
def has_object_permission(self, request, view, obj):
return False
class SuperuserPermission(permissions.BasePermission):
def has_permission(self, request, view):
if request.is_superuser():
return True
return False
class SystemPermission(permissions.BasePermission):
def has_permission(self, request, view):
pass
# return request.auth is ROOT_KEY and \
# is_privileged_request(request)
| wanghe4096/WangBlog | src/wangblog/api/permissions.py | Python | bsd-2-clause | 1,737 |
import json
import time
import unittest
from context import Bolt
client = Bolt("<your-api-key-here>", "<your-bolt-id>") # Pass in the API Key and the client ID.
class BoltTests(unittest.TestCase, unittest.TestLoader):
"""
This is a test class to test,
the functionality of the bolt python api library.
Methods:
- setUp()
- tearDown()
- test_digitalWrite()
- test_analogWrite()
- test_digitalRead()
- test_analogRead()
- test_serialBegin()
- test_serialWrite()
- test_serialRead()
- test_Restart()
- test_isOnline()
- test_isAlive()
"""
def setUp(self):
"""Setup function for all the testcases."""
self.sortTestMethodsUsing = None
def tearDown(self):
"""Tear down function for all the testcases"""
time.sleep(3)
def test_digitalWrite(self):
"""Testing the digital write function."""
assert_value = json.loads(client.digitalWrite('4', "HIGH"))
self.assertEqual(assert_value["success"], '1')
self.assertEqual(assert_value["value"], '1')
print("Digital Write Successfull!")
def test_analogWrite(self):
"""Testing the analog write function"""
assert_value = json.loads(client.analogWrite('0', "100"))
self.assertEqual(assert_value["success"], '1')
self.assertEqual(assert_value["value"], '1')
print("Analog Write Successfull!")
def test_digitalRead(self):
"""Testing the digital read function."""
assert_value = json.loads(client.digitalRead('1'))
self.assertEqual(assert_value["success"], '1')
self.assertEqual(int(assert_value["value"]), 1)
print("Digital Read Successfull!")
def test_analogRead(self):
"""Testing the analog read function."""
assert_value = json.loads(client.analogRead("A0"))
self.assertEqual(assert_value["success"], '1')
self.assertTrue(0 <= int(assert_value["value"]) <= 1024)
print("Analog Read Succesfull!")
def test_serialBegin(self):
"""Testing the serialBegin() function."""
assert_value = json.loads(client.serialBegin("9600"))
self.assertEqual(assert_value["success"], '1')
self.assertEqual(assert_value["value"], "Success")
print("Serial Begin Successfull!"
def test_serialWrite(self):
"""Testing the serialWrite() function."""
assert_value = json.loads(client.serialWrite('inventrom'))
self.assertEqual(assert_value["success"], '1')
self.assertEqual(assert_value["value"], "Serial write Successful")
print("Serial Write Successfull!")
def test_serialRead(self):
"""Testing the serialRead()"""
assert_value = json.loads(client.serialRead("10"))
self.assertEqual(assert_value["success"], '1')
self.assertEqual(assert_value["value"], "inventrom")
print("Serial Read Successfull!")
def test_Restart(self):
"""Testing the restart() function."""
assert_value = json.loads(client.restart())
time.sleep(5)
try:
self.assertEqual(assert_value["value"], "Restarted")
except AssertionError:
self.assertEqual(assert_value["value"], "Command timed out")
print("Restart Successfull!")
def test_isAlive(self):
"""Testing the isAlive() function."""
assert_value = json.loads(client.isAlive())
self.assertEqual(assert_value["success"], '1')
self.assertEqual(assert_value["value"], "alive")
print("isAlive Successfull!")
def test_isOnline(self):
"""Testing the isOnline()"""
assert_value = json.loads(client.isOnline())
self.assertEqual(str(assert_value["success"]), '1')
self.assertEqual(assert_value["value"], "online")
print("isOnline Successfull!")
if __name__ == "__main__":
is_device_online = json.loads(client.isOnline())
if is_device_online["value"] == "online":
unittest.main()
else:
print("The device is offline test cannot be conducted at the moment. Connect your Bolt to the cloud before testing it.")
| Inventrom/bolt-api-python | tests/functionality_test/functionality_testing.py | Python | mit | 4,150 |
# -*- coding: utf-8 -*-
#
# hill_tononi_Vp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
#! ===========================================
#! NEST Topology Module: A Case-Based Tutorial
#! ===========================================
#!
#! :Author: Hans Ekkehard Plesser
#! :Institution: Norwegian University of Life Sciences
#! :Version: 0.4
#! :Date: 21 November 2012
#! :Copyright: The NEST Initiative (2009-2012)
#! :License: Creative Commons Attribution License
#!
#! **NOTE:** The network generated by this script does generate
#! dynamics in which the activity of the entire system, especially
#! Rp and Vp oscillates with approx 5 Hz. This is different from
#! the full model. Deviations are due to the different model type
#! and the elimination of a number of connections, with no changes
#! to the weights.
#!
#! Introduction
#! ============
#!
#! This tutorial shows you how to implement a simplified version of the
#! Hill-Tononi model of the early visual pathway using the NEST Topology
#! module. The model is described in the paper
#!
#! S. L. Hill and G. Tononi.
#! Modeling Sleep and Wakefulness in the Thalamocortical System.
#! J Neurophysiology **93**:1671-1698 (2005).
#! Freely available via `doi 10.1152/jn.00915.2004
#! <http://dx.doi.org/10.1152/jn.00915.2004>`_.
#!
#! We simplify the model somewhat both to keep this tutorial a bit
#! shorter, and because some details of the Hill-Tononi model are not
#! currently supported by NEST. Simplifications include:
#!
#! 1. We use the ``iaf_cond_alpha`` neuron model, which is
#! simpler than the Hill-Tononi model.
#!
#! #. As the ``iaf_cond_alpha`` neuron model only supports two
#! synapses (labeled "ex" and "in"), we only include AMPA and
#! GABA_A synapses.
#!
#! #. We ignore the secondary pathway (Ts, Rs, Vs), since it adds just
#! more of the same from a technical point of view.
#!
#! #. Synaptic delays follow a Gaussian distribution in the HT
#! model. This implies actually a Gaussian distributions clipped at
#! some small, non-zero delay, since delays must be
#! positive. Currently, there is a bug in the Topology module when
#! using clipped Gaussian distribution. We therefore draw delays from a
#! uniform distribution.
#!
#! #. Some further adaptations are given at the appropriate locations in
#! the script.
#!
#! This tutorial is divided in the following sections:
#!
#! Philosophy_
#! Discusses the philosophy applied to model implementation in this
#! tutorial
#!
#! Preparations_
#! Neccessary steps to use NEST and the Topology Module
#!
#! `Configurable Parameters`_
#! Define adjustable network parameters
#!
#! `Neuron Models`_
#! Define the neuron models needed by the network model
#!
#! Populations_
#! Create Populations
#!
#! `Synapse models`_
#! Define the synapse models used in the network model
#!
#! Connections_
#! Create Connections
#!
#! `Example simulation`_
#! Perform a small simulation for illustration. This
#! section also discusses the setup for recording.
#! Philosophy
#! ==========
#! A network models has two essential components: *populations* and
#! *projections*. We first use NEST's ``CopyModel()`` mechanism to
#! create specific models for all populations and subpopulations in
#! the network, and then create the populations using the Topology
#! modules ``CreateLayer()`` function.
#!
#! We use a two-stage process to create the connections, mainly
#! because the same configurations are required for a number of
#! projections: we first define dictionaries specifying the
#! connections, then apply these dictionaries later.
#!
#! The way in which we declare the network model here is an
#! example. You should not consider it the last word: we expect to see
#! a significant development in strategies and tools for network
#! descriptions in the future. The following contributions to CNS\*09
#! seem particularly interesting
#!
#! - Ralf Ansorg & Lars Schwabe. Declarative model description and
#! code generation for hybrid individual- and population-based
#! simulations of the early visual system (P57);
#! - Sharon Crook, R. Angus Silver, & Padraig Gleeson. Describing
#! and exchanging models of neurons and neuronal networks with
#! NeuroML (F1);
#!
#! as well as the following paper which will apply in PLoS
#! Computational Biology shortly:
#!
#! - Eilen Nordlie, Marc-Oliver Gewaltig, & Hans Ekkehard Plesser.
#! Towards reproducible descriptions of neuronal network models.
#! Preparations
#! ============
#! Please make sure that your ``PYTHONPATH`` is set correctly, so
#! that Python can find the NEST Python module.
#! **Note:** By default, the script does not show any graphics.
#! Set ``SHOW_FIGURES`` to ``True`` to activate graphics.
SHOW_FIGURES = False
import pylab
if not SHOW_FIGURES:
pylab_show = pylab.show
def nop(s=None): pass
pylab.show = nop
else:
pylab.ion()
#! Introduction
#!=============
#! This tutorial gives a brief introduction to the ConnPlotter
#! toolbox. It is by no means complete.
#! Load pynest
import nest
#! Load NEST Topoplogy module (NEST 2.2)
import nest.topology as topo
#! Make sure we start with a clean slate, even if we re-run the script
#! in the same Python session.
nest.ResetKernel()
#! Import math, we need Pi
import math
#! Configurable Parameters
#! =======================
#!
#! Here we define those parameters that we take to be
#! configurable. The choice of configurable parameters is obviously
#! arbitrary, and in practice one would have far more configurable
#! parameters. We restrict ourselves to:
#!
#! - Network size in neurons ``N``, each layer is ``N x N``.
#! - Network size in subtended visual angle ``visSize``, in degree.
#! - Temporal frequency of drifting grating input ``f_dg``, in Hz.
#! - Spatial wavelength and direction of drifting grating input,
#! ``lambda_dg`` and ``phi_dg``, in degree/radian.
#! - Background firing rate of retinal nodes and modulation amplitude,
#! ``retDC`` and ``retAC``, in Hz.
#! - Simulation duration ``simtime``; actual simulation is split into
#! intervals of ``sim_interval`` length, so that the network state
#! can be visualized in those intervals. Times are in ms.
Params = {'N' : 40,
'visSize' : 8.0,
'f_dg' : 2.0,
'lambda_dg' : 2.0,
'phi_dg' : 0.0,
'retDC' : 30.0,
'retAC' : 30.0,
'simtime' : 100.0,
'sim_interval': 5.0
}
#! Neuron Models
#! =============
#!
#! We declare models in two steps:
#!
#! 1. We define a dictionary specifying the NEST neuron model to use
#! as well as the parameters for that model.
#! #. We create three copies of this dictionary with parameters
#! adjusted to the three model variants specified in Table~2 of
#! Hill & Tononi (2005) (cortical excitatory, cortical inhibitory,
#! thalamic)
#!
#! In addition, we declare the models for the stimulation and
#! recording devices.
#!
#! The general neuron model
#! ------------------------
#!
#! We use the ``iaf_cond_alpha`` neuron, which is an
#! integrate-and-fire neuron with two conductance-based synapses which
#! have alpha-function time course. Any input with positive weights
#! will automatically directed to the synapse labeled ``_ex``, any
#! with negative weights to the synapes labeled ``_in``. We define
#! **all** parameters explicitly here, so that no information is
#! hidden in the model definition in NEST. ``V_m`` is the membrane
#! potential to which the model neurons will be initialized.
#! The model equations and parameters for the Hill-Tononi neuron model
#! are given on pp. 1677f and Tables 2 and 3 in that paper. Note some
#! peculiarities and adjustments:
#!
#! - Hill & Tononi specify their model in terms of the membrane time
#! constant, while the ``iaf_cond_alpha`` model is based on the
#! membrane capcitance. Interestingly, conducantces are unitless in
#! the H&T model. We thus can use the time constant directly as
#! membrane capacitance.
#! - The model includes sodium and potassium leak conductances. We
#! combine these into a single one as follows:
#$ \begin{equation}-g_{NaL}(V-E_{Na}) - g_{KL}(V-E_K)
#$ =
#$ -(g_{NaL}+g_{KL})\left(V-\frac{g_{NaL}E_{NaL}+g_{KL}E_K}{g_{NaL}g_{KL}}\right)
#$ \end{equation}
#! - We write the resulting expressions for g_L and E_L explicitly
#! below, to avoid errors in copying from our pocket calculator.
#! - The paper gives a range of 1.0-1.85 for g_{KL}, we choose 1.5
#! here.
#! - The Hill-Tononi model has no explicit reset or refractory
#! time. We arbitrarily set V_reset and t_ref.
#! - The paper uses double exponential time courses for the synaptic
#! conductances, with separate time constants for the rising and
#! fallings flanks. Alpha functions have only a single time
#! constant: we use twice the rising time constant given by Hill and
#! Tononi.
#! - In the general model below, we use the values for the cortical
#! excitatory cells as defaults. Values will then be adapted below.
#!
nest.CopyModel('iaf_cond_alpha', 'NeuronModel',
params = {'C_m' : 16.0,
'E_L' : (0.2 * 30.0 + 1.5 * -90.0)/(0.2 + 1.5),
'g_L' : 0.2 + 1.5,
'E_ex' : 0.0,
'E_in' : -70.0,
'V_reset' : -60.0,
'V_th' : -51.0,
't_ref' : 2.0,
'tau_syn_ex': 1.0,
'tau_syn_in': 2.0,
'I_e' : 0.0,
'V_m' : -70.0})
#! Adaptation of models for different populations
#! ----------------------------------------------
#! We must copy the `NeuronModel` dictionary explicitly, otherwise
#! Python would just create a reference.
#! Cortical excitatory cells
#! .........................
#! Parameters are the same as above, so we need not adapt anything
nest.CopyModel('NeuronModel', 'CtxExNeuron')
#! Cortical inhibitory cells
#! .........................
nest.CopyModel('NeuronModel', 'CtxInNeuron',
params = {'C_m' : 8.0,
'V_th' : -53.0,
't_ref': 1.0})
#! Thalamic cells
#! ..............
nest.CopyModel('NeuronModel', 'ThalamicNeuron',
params = {'C_m' : 8.0,
'V_th' : -53.0,
't_ref': 1.0,
'E_in' : -80.0})
#! Input generating nodes
#! ----------------------
#! Input is generated by sinusoidally modulate Poisson generators,
#! organized in a square layer of retina nodes. These nodes require a
#! slightly more complicated initialization than all other elements of
#! the network:
#!
#! - Average firing rate ``DC``, firing rate modulation depth ``AC``, and
#! temporal modulation frequency ``Freq`` are the same for all retinal
#! nodes and are set directly below.
#! - The temporal phase ``Phi`` of each node depends on its position in
#! the grating and can only be assigned after the retinal layer has
#! been created. We therefore specify a function for initalizing the
#! phase ``Phi``. This function will be called for each node.
def phiInit(pos, lam, alpha):
'''Initializer function for phase of drifting grating nodes.
pos : position (x,y) of node, in degree
lam : wavelength of grating, in degree
alpha: angle of grating in radian, zero is horizontal
Returns number to be used as phase of AC Poisson generator.
'''
return 2.0 * math.pi / lam * (math.cos(alpha) * pos[0] + math.sin(alpha) * pos[1])
nest.CopyModel('sinusoidal_poisson_generator', 'RetinaNode',
params = {'ac' : Params['retAC'],
'dc' : Params['retDC'],
'freq' : Params['f_dg'],
'phi' : 0.0,
'individual_spike_trains': False})
#! Recording nodes
#! ---------------
#! We use the new ``multimeter`` device for recording from the model
#! neurons. At present, ``iaf_cond_alpha`` is one of few models
#! supporting ``multimeter`` recording. Support for more models will
#! be added soon; until then, you need to use ``voltmeter`` to record
#! from other models.
#!
#! We configure multimeter to record membrane potential to membrane
#! potential at certain intervals to memory only. We record the GID of
#! the recorded neurons, but not the time.
nest.CopyModel('multimeter', 'RecordingNode',
params = {'interval' : Params['sim_interval'],
'record_from': ['V_m'],
'record_to' : ['memory'],
'withgid' : True,
'withtime' : False})
#! Populations
#! ===========
#! We now create the neuron populations in the model, again in the
#! form of Python dictionaries. We define them in order from eye via
#! thalamus to cortex.
#!
#! We first define a dictionary defining common properties for all
#! populations
layerProps = {'rows' : Params['N'],
'columns' : Params['N'],
'extent' : [Params['visSize'], Params['visSize']],
'edge_wrap': True}
#! This dictionary does not yet specify the elements to put into the
#! layer, since they will differ from layer to layer. We will add them
#! below by updating the ``'elements'`` dictionary entry for each
#! population.
#! Retina
#! ------
layerProps.update({'elements': 'RetinaNode'})
retina = topo.CreateLayer(layerProps)
#! Now set phases of retinal oscillators; we use a list comprehension instead
#! of a loop.
[nest.SetStatus([n], {"phi": phiInit(topo.GetPosition([n])[0],
Params["lambda_dg"],
Params["phi_dg"])})
for n in nest.GetLeaves(retina)[0]]
#! Thalamus
#! --------
#! We first introduce specific neuron models for the thalamic relay
#! cells and interneurons. These have identical properties, but by
#! treating them as different models, we can address them specifically
#! when building connections.
#!
#! We use a list comprehension to do the model copies.
[nest.CopyModel('ThalamicNeuron', SpecificModel) for SpecificModel in ('TpRelay', 'TpInter')]
#! Now we can create the layer, with one relay cell and one
#! interneuron per location:
layerProps.update({'elements': ['TpRelay', 'TpInter']})
Tp = topo.CreateLayer(layerProps)
#! Reticular nucleus
#! -----------------
#! We follow the same approach as above, even though we have only a
#! single neuron in each location.
[nest.CopyModel('ThalamicNeuron', SpecificModel) for SpecificModel in ('RpNeuron',)]
layerProps.update({'elements': 'RpNeuron'})
Rp = topo.CreateLayer(layerProps)
#! Primary visual cortex
#! ---------------------
#! We follow again the same approach. We differentiate neuron types
#! between layers and between pyramidal cells and interneurons. At
#! each location, there are two pyramidal cells and one interneuron in
#! each of layers 2-3, 4, and 5-6. Finally, we need to differentiate
#! between vertically and horizontally tuned populations. When creating
#! the populations, we create the vertically and the horizontally
#! tuned populations as separate populations.
#! We use list comprehesions to create all neuron types:
[nest.CopyModel('CtxExNeuron', layer+'pyr') for layer in ('L23','L4','L56')]
[nest.CopyModel('CtxInNeuron', layer+'in' ) for layer in ('L23','L4','L56')]
#! Now we can create the populations, suffixes h and v indicate tuning
layerProps.update({'elements': ['L23pyr', 2, 'L23in', 1,
'L4pyr' , 2, 'L4in' , 1,
'L56pyr', 2, 'L56in', 1]})
Vp_h = topo.CreateLayer(layerProps)
Vp_v = topo.CreateLayer(layerProps)
#! Collect all populations
#! -----------------------
#! For reference purposes, e.g., printing, we collect all populations
#! in a tuple:
populations = (retina, Tp, Rp, Vp_h, Vp_v)
#! Inspection
#! ----------
#! We can now look at the network using `PrintNetwork`:
nest.PrintNetwork()
#! We can also try to plot a single layer in a network. For
#! simplicity, we use Rp, which has only a single neuron per position.
topo.PlotLayer(Rp)
pylab.title('Layer Rp')
pylab.show()
#! Synapse models
#! ==============
#! Actual synapse dynamics, e.g., properties such as the synaptic time
#! course, time constants, reversal potentials, are properties of
#! neuron models in NEST and we set them in section `Neuron models`_
#! above. When we refer to *synapse models* in NEST, we actually mean
#! connectors which store information about connection weights and
#! delays, as well as port numbers at the target neuron (``rport``)
#! and implement synaptic plasticity. The latter two aspects are not
#! relevant here.
#!
#! We just use NEST's ``static_synapse`` connector but copy it to
#! synapse models ``AMPA`` and ``GABA_A`` for the sake of
#! explicitness. Weights and delays are set as needed in section
#! `Connections`_ below, as they are different from projection to
#! projection. De facto, the sign of the synaptic weight decides
#! whether input via a connection is handle by the ``_ex`` or the
#! ``_in`` synapse.
nest.CopyModel('static_synapse', 'AMPA')
nest.CopyModel('static_synapse', 'GABA_A')
#! Connections
#! ====================
#! Building connections is the most complex part of network
#! construction. Connections are specified in Table 1 in the
#! Hill-Tononi paper. As pointed out above, we only consider AMPA and
#! GABA_A synapses here. Adding other synapses is tedious work, but
#! should pose no new principal challenges. We also use a uniform in
#! stead of a Gaussian distribution for the weights.
#!
#! The model has two identical primary visual cortex populations,
#! ``Vp_v`` and ``Vp_h``, tuned to vertical and horizonal gratings,
#! respectively. The *only* difference in the connection patterns
#! between the two populations is the thalamocortical input to layers
#! L4 and L5-6 is from a population of 8x2 and 2x8 grid locations,
#! respectively. Furthermore, inhibitory connection in cortex go to
#! the opposing orientation population as to the own.
#!
#! To save us a lot of code doubling, we thus defined properties
#! dictionaries for all connections first and then use this to connect
#! both populations. We follow the subdivision of connections as in
#! the Hill & Tononi paper.
#!
#! **Note:** Hill & Tononi state that their model spans 8 degrees of
#! visual angle and stimuli are specified according to this. On the
#! other hand, all connection patterns are defined in terms of cell
#! grid positions. Since the NEST Topology Module defines connection
#! patterns in terms of the extent given in degrees, we need to apply
#! the following scaling factor to all lengths in connections:
dpc = Params['visSize'] / (Params['N'] - 1)
#! We will collect all same-orientation cortico-cortical connections in
ccConnections = []
#! the cross-orientation cortico-cortical connections in
ccxConnections = []
#! and all cortico-thalamic connections in
ctConnections = []
#! Horizontal intralaminar
#! -----------------------
#! *Note:* "Horizontal" means "within the same cortical layer" in this
#! case.
#!
#! We first define a dictionary with the (most) common properties for
#! horizontal intralaminar connection. We then create copies in which
#! we adapt those values that need adapting, and
horIntraBase = {"connection_type": "divergent",
"synapse_model": "AMPA",
"mask": {"circular": {"radius": 12.0 * dpc}},
"kernel": {"gaussian": {"p_center": 0.05, "sigma": 7.5 * dpc}},
"weights": 1.0,
"delays": {"uniform": {"min": 1.75, "max": 2.25}}}
#! We use a loop to do the for for us. The loop runs over a list of
#! dictionaries with all values that need updating
for conn in [{"sources": {"model": "L23pyr"}, "targets": {"model": "L23pyr"}},
{"sources": {"model": "L23pyr"}, "targets": {"model": "L23in" }},
{"sources": {"model": "L4pyr" }, "targets": {"model": "L4pyr" },
"mask" : {"circular": {"radius": 7.0 * dpc}}},
{"sources": {"model": "L4pyr" }, "targets": {"model": "L4in" },
"mask" : {"circular": {"radius": 7.0 * dpc}}},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L56pyr" }},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L56in" }}]:
ndict = horIntraBase.copy()
ndict.update(conn)
ccConnections.append(ndict)
#! Vertical intralaminar
#! -----------------------
#! *Note:* "Vertical" means "between cortical layers" in this
#! case.
#!
#! We proceed as above.
verIntraBase = {"connection_type": "divergent",
"synapse_model": "AMPA",
"mask": {"circular": {"radius": 2.0 * dpc}},
"kernel": {"gaussian": {"p_center": 1.0, "sigma": 7.5 * dpc}},
"weights": 2.0,
"delays": {"uniform": {"min": 1.75, "max": 2.25}}}
for conn in [{"sources": {"model": "L23pyr"}, "targets": {"model": "L56pyr"}, "weights": 1.0},
{"sources": {"model": "L23pyr"}, "targets": {"model": "L23in" }, "weights": 1.0},
{"sources": {"model": "L4pyr" }, "targets": {"model": "L23pyr"}},
{"sources": {"model": "L4pyr" }, "targets": {"model": "L23in" }},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L23pyr"}},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L23in" }},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L4pyr" }},
{"sources": {"model": "L56pyr"}, "targets": {"model": "L4in" }}]:
ndict = verIntraBase.copy()
ndict.update(conn)
ccConnections.append(ndict)
#! Intracortical inhibitory
#! ------------------------
#!
#! We proceed as above, with the following difference: each connection
#! is added to the same-orientation and the cross-orientation list of
#! connections.
#!
#! **Note:** Weights increased from -1.0 to -2.0, to make up for missing GabaB
#!
#! Note that we have to specify the **weight with negative sign** to make
#! the connections inhibitory.
intraInhBase = {"connection_type": "divergent",
"synapse_model": "GABA_A",
"mask": {"circular": {"radius": 7.0 * dpc}},
"kernel": {"gaussian": {"p_center": 0.25, "sigma": 7.5 * dpc}},
"weights": -2.0,
"delays": {"uniform": {"min": 1.75, "max": 2.25}}}
#! We use a loop to do the for for us. The loop runs over a list of
#! dictionaries with all values that need updating
for conn in [{"sources": {"model": "L23in"}, "targets": {"model": "L23pyr"}},
{"sources": {"model": "L23in"}, "targets": {"model": "L23in" }},
{"sources": {"model": "L4in" }, "targets": {"model": "L4pyr" }},
{"sources": {"model": "L4in" }, "targets": {"model": "L4in" }},
{"sources": {"model": "L56in"}, "targets": {"model": "L56pyr"}},
{"sources": {"model": "L56in"}, "targets": {"model": "L56in" }}]:
ndict = intraInhBase.copy()
ndict.update(conn)
ccConnections.append(ndict)
ccxConnections.append(ndict)
#! Corticothalamic
#! ---------------
corThalBase = {"connection_type": "divergent",
"synapse_model": "AMPA",
"mask": {"circular": {"radius": 5.0 * dpc}},
"kernel": {"gaussian": {"p_center": 0.5, "sigma": 7.5 * dpc}},
"weights": 1.0,
"delays": {"uniform": {"min": 7.5, "max": 8.5}}}
#! We use a loop to do the for for us. The loop runs over a list of
#! dictionaries with all values that need updating
for conn in [{"sources": {"model": "L56pyr"}, "targets": {"model": "TpRelay" }},
{"sources": {"model": "L56pyr"}, "targets": {"model": "TpInter" }}]:
ndict = intraInhBase.copy()
ndict.update(conn)
ctConnections.append(ndict)
#! Corticoreticular
#! ----------------
#! In this case, there is only a single connection, so we write the
#! dictionary itself; it is very similar to the corThalBase, and to
#! show that, we copy first, then update. We need no ``targets`` entry,
#! since Rp has only one neuron per location.
corRet = corThalBase.copy()
corRet.update({"sources": {"model": "L56pyr"}, "weights": 2.5})
#! Build all connections beginning in cortex
#! -----------------------------------------
#! Cortico-cortical, same orientation
print("Connecting: cortico-cortical, same orientation")
[topo.ConnectLayers(Vp_h, Vp_h, conn) for conn in ccConnections]
[topo.ConnectLayers(Vp_v, Vp_v, conn) for conn in ccConnections]
#! Cortico-cortical, cross-orientation
print("Connecting: cortico-cortical, other orientation")
[topo.ConnectLayers(Vp_h, Vp_v, conn) for conn in ccxConnections]
[topo.ConnectLayers(Vp_v, Vp_h, conn) for conn in ccxConnections]
#! Cortico-thalamic connections
print("Connecting: cortico-thalamic")
[topo.ConnectLayers(Vp_h, Tp, conn) for conn in ctConnections]
[topo.ConnectLayers(Vp_v, Tp, conn) for conn in ctConnections]
topo.ConnectLayers(Vp_h, Rp, corRet)
topo.ConnectLayers(Vp_v, Rp, corRet)
#! Thalamo-cortical connections
#! ----------------------------
#! **Note:** According to the text on p. 1674, bottom right, of
#! the Hill & Tononi paper, thalamocortical connections are
#! created by selecting from the thalamic population for each
#! L4 pyramidal cell, ie, are *convergent* connections.
#!
#! We first handle the rectangular thalamocortical connections.
thalCorRect = {"connection_type": "convergent",
"sources": {"model": "TpRelay"},
"synapse_model": "AMPA",
"weights": 5.0,
"delays": {"uniform": {"min": 2.75, "max": 3.25}}}
print("Connecting: thalamo-cortical")
#! Horizontally tuned
thalCorRect.update({"mask": {"rectangular": {"lower_left" : [-4.0*dpc, -1.0*dpc],
"upper_right": [ 4.0*dpc, 1.0*dpc]}}})
for conn in [{"targets": {"model": "L4pyr" }, "kernel": 0.5},
{"targets": {"model": "L56pyr"}, "kernel": 0.3}]:
thalCorRect.update(conn)
topo.ConnectLayers(Tp, Vp_h, thalCorRect)
#! Vertically tuned
thalCorRect.update({"mask": {"rectangular": {"lower_left" : [-1.0*dpc, -4.0*dpc],
"upper_right": [ 1.0*dpc, 4.0*dpc]}}})
for conn in [{"targets": {"model": "L4pyr" }, "kernel": 0.5},
{"targets": {"model": "L56pyr"}, "kernel": 0.3}]:
thalCorRect.update(conn)
topo.ConnectLayers(Tp, Vp_v, thalCorRect)
#! Diffuse connections
thalCorDiff = {"connection_type": "convergent",
"sources": {"model": "TpRelay"},
"synapse_model": "AMPA",
"weights": 5.0,
"mask": {"circular": {"radius": 5.0 * dpc}},
"kernel": {"gaussian": {"p_center": 0.1, "sigma": 7.5 * dpc}},
"delays": {"uniform": {"min": 2.75, "max": 3.25}}}
for conn in [{"targets": {"model": "L4pyr" }},
{"targets": {"model": "L56pyr"}}]:
thalCorDiff.update(conn)
topo.ConnectLayers(Tp, Vp_h, thalCorDiff)
topo.ConnectLayers(Tp, Vp_v, thalCorDiff)
#! Thalamic connections
#! --------------------
#! Connections inside thalamus, including Rp
#!
#! *Note:* In Hill & Tononi, the inhibition between Rp cells is mediated by
#! GABA_B receptors. We use GABA_A receptors here to provide some self-dampening
#! of Rp.
#!
#! **Note:** The following code had a serious bug in v. 0.1: During the first
#! iteration of the loop, "synapse_model" and "weights" were set to "AMPA" and "0.1",
#! respectively and remained unchanged, so that all connections were created as
#! excitatory connections, even though they should have been inhibitory. We now
#! specify synapse_model and weight explicitly for each connection to avoid this.
thalBase = {"connection_type": "divergent",
"delays": {"uniform": {"min": 1.75, "max": 2.25}}}
print("Connecting: intra-thalamic")
for src, tgt, conn in [(Tp, Rp, {"sources": {"model": "TpRelay"},
"synapse_model": "AMPA",
"mask": {"circular": {"radius": 2.0 * dpc}},
"kernel": {"gaussian": {"p_center": 1.0, "sigma": 7.5 * dpc}},
"weights": 2.0}),
(Tp, Tp, {"sources": {"model": "TpInter"},
"targets": {"model": "TpRelay"},
"synapse_model": "GABA_A",
"weights": -1.0,
"mask": {"circular": {"radius": 2.0 * dpc}},
"kernel": {"gaussian": {"p_center": 0.25, "sigma": 7.5 * dpc}}}),
(Tp, Tp, {"sources": {"model": "TpInter"},
"targets": {"model": "TpInter"},
"synapse_model": "GABA_A",
"weights": -1.0,
"mask": {"circular": {"radius": 2.0 * dpc}},
"kernel": {"gaussian": {"p_center": 0.25, "sigma": 7.5 * dpc}}}),
(Rp, Tp, {"targets": {"model": "TpRelay"},
"synapse_model": "GABA_A",
"weights": -1.0,
"mask": {"circular": {"radius": 12.0 * dpc}},
"kernel": {"gaussian": {"p_center": 0.15, "sigma": 7.5 * dpc}}}),
(Rp, Tp, {"targets": {"model": "TpInter"},
"synapse_model": "GABA_A",
"weights": -1.0,
"mask": {"circular": {"radius": 12.0 * dpc}},
"kernel": {"gaussian": {"p_center": 0.15, "sigma": 7.5 * dpc}}}),
(Rp, Rp, {"targets": {"model": "RpNeuron"},
"synapse_model": "GABA_A",
"weights": -1.0,
"mask": {"circular": {"radius": 12.0 * dpc}},
"kernel": {"gaussian": {"p_center": 0.5, "sigma": 7.5 * dpc}}})]:
thalBase.update(conn)
topo.ConnectLayers(src, tgt, thalBase)
#! Thalamic input
#! --------------
#! Input to the thalamus from the retina.
#!
#! **Note:** Hill & Tononi specify a delay of 0 ms for this connection.
#! We use 1 ms here.
retThal = {"connection_type": "divergent",
"synapse_model": "AMPA",
"mask": {"circular": {"radius": 1.0 * dpc}},
"kernel": {"gaussian": {"p_center": 0.75, "sigma": 2.5 * dpc}},
"weights": 10.0,
"delays": 1.0}
print("Connecting: retino-thalamic")
for conn in [{"targets": {"model": "TpRelay"}},
{"targets": {"model": "TpInter"}}]:
retThal.update(conn)
topo.ConnectLayers(retina, Tp, retThal)
#! Checks on connections
#! ---------------------
#! As a very simple check on the connections created, we inspect
#! the connections from the central node of various layers.
#! Connections from Retina to TpRelay
topo.PlotTargets(topo.FindCenterElement(retina), Tp, 'TpRelay', 'AMPA')
pylab.title('Connections Retina -> TpRelay')
pylab.show()
#! Connections from TpRelay to L4pyr in Vp (horizontally tuned)
topo.PlotTargets(topo.FindCenterElement(Tp), Vp_h, 'L4pyr', 'AMPA')
pylab.title('Connections TpRelay -> Vp(h) L4pyr')
pylab.show()
#! Connections from TpRelay to L4pyr in Vp (vertically tuned)
topo.PlotTargets(topo.FindCenterElement(Tp), Vp_v, 'L4pyr', 'AMPA')
pylab.title('Connections TpRelay -> Vp(v) L4pyr')
pylab.show()
#! Recording devices
#! =================
#! This recording device setup is a bit makeshift. For each population
#! we want to record from, we create one ``multimeter``, then select
#! all nodes of the right model from the target population and
#! connect. ``loc`` is the subplot location for the layer.
print("Connecting: Recording devices")
recorders = {}
for name, loc, population, model in [('TpRelay' , 1, Tp , 'TpRelay'),
('Rp' , 2, Rp , 'RpNeuron'),
('Vp_v L4pyr', 3, Vp_v, 'L4pyr'),
('Vp_h L4pyr', 4, Vp_h, 'L4pyr')]:
recorders[name] = (nest.Create('RecordingNode'), loc)
tgts = [nd for nd in nest.GetLeaves(population)[0]
if nest.GetStatus([nd], 'model')[0]==model]
nest.Connect(recorders[name][0], tgts) # one recorder to all targets
#! Example simulation
#! ====================
#! This simulation is set up to create a step-wise visualization of
#! the membrane potential. To do so, we simulate ``sim_interval``
#! milliseconds at a time, then read out data from the multimeters,
#! clear data from the multimeters and plot the data as pseudocolor
#! plots.
#! show time during simulation
nest.SetStatus([0],{'print_time': True})
#! lower and upper limits for color scale, for each of the four
#! populations recorded.
vmn=[-80,-80,-80,-80]
vmx=[-50,-50,-50,-50]
nest.Simulate(Params['sim_interval'])
#! loop over simulation intervals
for t in pylab.arange(Params['sim_interval'], Params['simtime'], Params['sim_interval']):
# do the simulation
nest.Simulate(Params['sim_interval'])
# clear figure and choose colormap
pylab.clf()
pylab.jet()
# now plot data from each recorder in turn, assume four recorders
for name, r in recorders.items():
rec = r[0]
sp = r[1]
pylab.subplot(2,2,sp)
d = nest.GetStatus(rec)[0]['events']['V_m']
if len(d) != Params['N']**2:
# cortical layer with two neurons in each location, take average
d = 0.5 * ( d[::2] + d[1::2] )
# clear data from multimeter
nest.SetStatus(rec, {'n_events': 0})
pylab.imshow(pylab.reshape(d, (Params['N'],Params['N'])),
aspect='equal', interpolation='nearest',
extent=(0,Params['N']+1,0,Params['N']+1),
vmin=vmn[sp-1], vmax=vmx[sp-1])
pylab.colorbar()
pylab.title(name + ', t = %6.1f ms' % nest.GetKernelStatus()['time'])
pylab.draw() # force drawing inside loop
pylab.show() # required by ``pyreport``
#! just for some information at the end
print(nest.GetKernelStatus())
| synergetics/nest | topology/examples/hill_tononi_Vp.py | Python | gpl-2.0 | 35,334 |
# Copyright 2017 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Postprocess output from call_variants to produce a VCF file."""
import collections
import copy
import itertools
import os
import tempfile
import time
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
from third_party.nucleus.io import fasta
from third_party.nucleus.io import sharded_file_utils
from third_party.nucleus.io import tabix
from third_party.nucleus.io import tfrecord
from third_party.nucleus.io import vcf
from third_party.nucleus.protos import struct_pb2
from third_party.nucleus.protos import variants_pb2
from third_party.nucleus.util import errors
from third_party.nucleus.util import genomics_math
from third_party.nucleus.util import proto_utils
from third_party.nucleus.util import ranges
from third_party.nucleus.util import variant_utils
from third_party.nucleus.util import variantcall_utils
from third_party.nucleus.util import vcf_constants
from third_party.nucleus.util.struct_utils import add_string_field
from deepvariant import dv_constants
from deepvariant import dv_vcf_constants
from deepvariant import haplotypes
from deepvariant import logging_level
from deepvariant import tf_utils
from deepvariant import vcf_stats
from deepvariant.protos import deepvariant_pb2
from deepvariant.python import postprocess_variants as postprocess_variants_lib
FLAGS = flags.FLAGS
flags.DEFINE_string(
'infile', None,
'Required. Path(s) to CallVariantOutput protos in TFRecord format to '
'postprocess. These should be the complete set of outputs for '
'call_variants.py.')
flags.DEFINE_string(
'outfile', None,
'Required. Destination path where we will write output variant calls in '
'VCF format.')
flags.DEFINE_string(
'ref', None,
'Required. Genome reference in FAI-indexed FASTA format. Used to determine '
'the sort order for the emitted variants and the VCF header.')
flags.DEFINE_float(
'qual_filter', 1.0,
'Any variant with QUAL < qual_filter will be filtered in the VCF file.')
flags.DEFINE_float(
'cnn_homref_call_min_gq', 20.0,
'All CNN RefCalls whose GQ is less than this value will have ./. genotype '
'instead of 0/0.')
flags.DEFINE_float(
'multi_allelic_qual_filter', 1.0,
'The qual value below which to filter multi-allelic variants.')
flags.DEFINE_string(
'nonvariant_site_tfrecord_path', None,
'Optional. Path(s) to the non-variant sites protos in TFRecord format to '
'convert to gVCF file. This should be the complete set of outputs from the '
'--gvcf flag of make_examples.py.')
flags.DEFINE_string(
'gvcf_outfile', None,
'Optional. Destination path where we will write the Genomic VCF output.')
flags.DEFINE_boolean(
'group_variants', True, 'If using vcf_candidate_importer and multi-allelic '
'sites are split across multiple lines in VCF, set to False so that '
'variants are not grouped when transforming CallVariantsOutput to '
'Variants.')
flags.DEFINE_boolean(
'vcf_stats_report', True, 'Optional. Output a visual report (HTML) of '
'statistics about the output VCF at the same base path given by --outfile.')
flags.DEFINE_string(
'sample_name', None,
'Optional. If set, this will only be used if the sample name cannot be '
'determined from the CallVariantsOutput or non-variant sites protos.')
flags.DEFINE_boolean(
'use_multiallelic_model', False,
'If True, use a specialized model for genotype resolution of multiallelic '
'cases with two alts.')
flags.DEFINE_enum(
'debug_output_all_candidates', None, ['ALT', 'INFO'],
'Outputs all candidates considered by DeepVariant as additional ALT '
'alleles or as an INFO field. For ALT, filtered candidates are assigned '
'a GL=0 and added as ALTs alleles, but do not appear in any sample '
'genotypes. This flag is useful for debugging purposes. '
'ALT-mode is incompatible with the multiallelic caller.')
flags.DEFINE_boolean('only_keep_pass', False, 'If True, only keep PASS calls.')
# Some format fields are indexed by alt allele, such as AD (depth by allele).
# These need to be cleaned up if we remove any alt alleles. Any info field
# listed here will be have its values cleaned up if we've removed any alt
# alleles.
# Each tuple contains: field name, ref_is_zero.
_ALT_ALLELE_INDEXED_FORMAT_FIELDS = frozenset([('AD', True), ('VAF', False)])
# The number of places past the decimal point to round QUAL estimates to.
_QUAL_PRECISION = 7
# The genotype likelihood of the gVCF alternate allele for variant calls.
_GVCF_ALT_ALLELE_GL = -99
# FASTA cache size. Span 300 Mb so that we query each chromosome at most once.
_FASTA_CACHE_SIZE = 300000000
# When this was set, it's about 20 seconds per log.
_LOG_EVERY_N = 100000
# When outputting all alt alleles, use placeholder value to indicate genotype
# will be soft-filtered.
_FILTERED_ALT_PROB = -9.0
def _extract_single_sample_name(record):
"""Returns the name of the single sample within the CallVariantsOutput file.
Args:
record: A deepvariant_pb2.CallVariantsOutput record.
Returns:
The name of the single individual in the first proto in the file.
Raises:
ValueError: There is not exactly one VariantCall in the proto or the
call_set_name of the VariantCall is not populated.
"""
variant = record.variant
call = variant_utils.only_call(variant)
name = call.call_set_name
if not name:
raise ValueError(
'Error extracting name: no call_set_name set: {}'.format(record))
return name
def compute_filter_fields(variant, min_quality):
"""Computes the filter fields for this variant.
Variant filters are generated based on its quality score value and particular
genotype call.
Args:
variant: Variant to filter.
min_quality: Minimum acceptable phred scaled variant detection probability.
Returns:
Filter field strings to be added to the variant.
"""
if variant_utils.genotype_type(variant) == variant_utils.GenotypeType.no_call:
return [dv_vcf_constants.DEEP_VARIANT_NO_CALL]
if variant_utils.genotype_type(variant) == variant_utils.GenotypeType.hom_ref:
return [dv_vcf_constants.DEEP_VARIANT_REF_FILTER]
elif variant.quality < min_quality:
return [dv_vcf_constants.DEEP_VARIANT_QUAL_FILTER]
else:
return [dv_vcf_constants.DEEP_VARIANT_PASS]
def most_likely_genotype(predictions, ploidy=2, n_alleles=2):
"""Gets the most likely genotype from predictions.
From https://samtools.github.io/hts-specs/VCFv4.3.pdf:
Genotype Ordering. In general case of ploidy P and N alternate alleles (0 is
the REF and 1..N the alternate alleles), the ordering of genotypes for the
likelihoods can be expressed by the following pseudocode with as many nested
loops as ploidy:
* Note that we use inclusive for loop boundaries.
for a_P = 0 . . . N
for a_P-1 = 0 . . . aP
. . .
for a_1 = 0 . . . a2
println a1 a2 . . . aP
Alternatively, the same can be achieved recursively with the following
pseudocode:
Ordering (P , N , suffix =""):
for a in 0 . . . N
if (P == 1) println str (a) + suffix
if (P > 1) Ordering (P -1 , a, str (a) + suffix)
Examples:
* for P=2 and N=1, the ordering is 00,01,11
* for P=2 and N=2, the ordering is 00,01,11,02,12,22
* for P=3 and N=2, the ordering is 000,001,011,111,002,012,112,022,122,222
* for P=1, the index of the genotype a is a
* for P=2, the index of the genotype "a/b", where a <= b, is b(b + 1)/2 + a
* for P=2 and arbitrary N, the ordering can be easily derived from a
triangular matrix:
b / a 0 1 2 3
0 0
1 1 2
2 3 4 5
3 6 7 8 9
Args:
predictions: N element array-like. The real-space probabilities of each
genotype state for this variant. The number of elements in predictions is
related to ploidy and n_alleles is given by N = choose(ploidy + n_alleles
- 1, n_alleles -1)
for more information see:
http://genome.sph.umich.edu/wiki/Relationship_between_Ploidy,_Alleles_and_Genotypes
ploidy: int >= 1. The ploidy (e.g., number of chromosomes) of this sample.
n_alleles: int >= 2. The number of alleles (ref + n_alts).
Returns:
Two values. The first is the index of the most likely prediction in
predictions. The second is a list of P elements with the VCF-style genotype
indices corresponding to this index. For example, with P = 2 and an index of
1, this returns the value (1, [0, 1]).
Raises:
NotImplementedError: if ploidy != 2 as this not yet implemented.
ValueError: If n_alleles < 2.
ValueError: If we cannot determine the genotype given prediction, n_alts,
and ploidy.
"""
# redacted
if ploidy != 2:
raise NotImplementedError('Ploidy != 2 not yet implemented.')
if n_alleles < 2:
raise ValueError('n_alleles must be >= 2 but got', n_alleles)
# redacted
# number of elements. But that would involve calculating the binomial
# coefficient of n_alleles and ploidy, which would be expensive. Probably
# need to memoize the whole function if we are going to add this.
index_of_max = np.argmax(predictions)
# This is the general case solution for fixed ploidy of 2 and arbitrary
# n_alleles. We should generalize this code to the arbitrary ploidy case when
# needed and memoize the mapping here.
index = 0
for h1 in range(0, n_alleles + 1):
for h2 in range(0, h1 + 1):
if index == index_of_max:
return index, [h2, h1]
index += 1
raise ValueError('No corresponding GenotypeType for predictions', predictions)
def uncall_gt_if_no_ad(variant):
"""Converts genotype to "./." if sum(AD)=0."""
vcall = variant_utils.only_call(variant)
if sum(variantcall_utils.get_ad(vcall)) == 0:
# Set GT to ./.; GLs set to 0; GQ=0
vcall.genotype[:] = [-1, -1]
vcall.genotype_likelihood[:] = [0, 0]
variantcall_utils.set_gq(vcall, 0)
def uncall_homref_gt_if_lowqual(variant, min_homref_gq):
"""Converts genotype to "./." if variant is CNN RefCall and has low GQ.
If the variant has "RefCall" filter (which means an example was created for
this site but CNN didn't call this as variant) and if the GQ is less than
the given min_homref_gq threshold, set the genotype of the variant proto
to "./.". See http://internal for more info.
Args:
variant: third_party.nucleus.protos.Variant proto.
min_homref_gq: float.
"""
vcall = variant_utils.only_call(variant)
if (variant.filter == [dv_vcf_constants.DEEP_VARIANT_REF_FILTER] and
variantcall_utils.get_gq(vcall) < min_homref_gq):
vcall.genotype[:] = [-1, -1]
def add_call_to_variant(variant, predictions, qual_filter=0, sample_name=None):
"""Fills in Variant record using the prediction probabilities.
This functions sets the call[0].genotype, call[0].info['GQ'],
call[0].genotype_probabilities, variant.filter, and variant.quality fields of
variant based on the genotype likelihoods in predictions.
Args:
variant: third_party.nucleus.protos.Variant protobuf to be filled in with
info derived from predictions.
predictions: N element array-like. The real-space probabilities of each
genotype state for this variant.
qual_filter: float. If predictions implies that this isn't a reference call
and the QUAL of the prediction isn't larger than qual_filter variant will
be marked as FILTERed.
sample_name: str. The name of the sample to assign to the Variant proto
call_set_name field.
Returns:
A Variant record.
Raises:
ValueError: If variant doesn't have exactly one variant.call record.
"""
call = variant_utils.only_call(variant)
n_alleles = len(variant.alternate_bases) + 1
index, genotype = most_likely_genotype(predictions, n_alleles=n_alleles)
gq, variant.quality = compute_quals(predictions, index)
call.call_set_name = sample_name
variantcall_utils.set_gt(call, genotype)
variantcall_utils.set_gq(call, gq)
gls = [genomics_math.perror_to_bounded_log10_perror(gp) for gp in predictions]
variantcall_utils.set_gl(call, gls)
uncall_gt_if_no_ad(variant)
variant.filter[:] = compute_filter_fields(variant, qual_filter)
uncall_homref_gt_if_lowqual(variant, FLAGS.cnn_homref_call_min_gq)
return variant
def compute_quals(predictions, prediction_index):
"""Computes GQ and QUAL values from a set of prediction probabilities.
Prediction probabilities are represented as a probability distribution over
the N genotype states (e.g., for 3 genotype states {HOM_REF, HET, HOM_VAR}).
Genotype Quality (or GQ) represents the PHRED scaled confidence in the
particular genotype assignment. Likewise the QUAL representes the PHRED scaled
confidence in variant as compared to reference, that is, P(NON_REF) / P(ALL)
which in the diploid genotype case is P(HET) + P(HOM_VAR) / P(ALL). These
quality scores are capped by _MAX_CONFIDENCE.
Args:
predictions: N element array-like. The real-space probabilities of each
genotype state for this variant.
prediction_index: int. The actual called genotype from the distribution.
Returns:
GQ and QUAL values for output in a Variant record.
"""
# GQ is prob(genotype) / prob(all genotypes)
# GQ is rounded to the nearest integer to comply with the VCF spec.
gq = int(
np.around(
genomics_math.ptrue_to_bounded_phred(predictions[prediction_index])))
# QUAL is prob(variant genotype) / prob(all genotypes)
# Taking the min to avoid minor numerical issues than can push sum > 1.0.
# redacted
# genomics_math.perror_to_phred(max(predictions[0], min_ref_confidence))
# where min_ref_confidence is roughly 1.25e-10 (producing a qual of 99).
qual = genomics_math.ptrue_to_bounded_phred(min(sum(predictions[1:]), 1.0))
rounded_qual = round(qual, _QUAL_PRECISION)
return gq, rounded_qual
def expected_alt_allele_indices(num_alternate_bases):
"""Returns (sorted) expected list of alt_allele_indices, given #alt bases."""
num_alleles = num_alternate_bases + 1
alt_allele_indices_list = [
sorted(list(set(x) - {0}))
for x in itertools.combinations(range(num_alleles), 2)
]
# alt_allele_indices starts from 0, where 0 refers to the first alt allele.
# pylint: disable=g-complex-comprehension
return sorted([[i - 1
for i in alt_allele_indices]
for alt_allele_indices in alt_allele_indices_list])
# pylint: enable=g-complex-comprehension
def _check_alt_allele_indices(call_variants_outputs):
"""Returns True if and only if the alt allele indices are valid."""
all_alt_allele_indices = sorted([
list(call_variants_output.alt_allele_indices.indices)
for call_variants_output in call_variants_outputs
])
if all_alt_allele_indices != expected_alt_allele_indices(
len(call_variants_outputs[0].variant.alternate_bases)):
logging.warning(
'Alt allele indices found from call_variants_outputs for '
'variant %s is %s, which is invalid.', call_variants_outputs[0].variant,
all_alt_allele_indices)
return False
return True
def is_valid_call_variants_outputs(call_variants_outputs):
"""Returns True if the call_variants_outputs follows our assumptions.
Args:
call_variants_outputs: list of CallVariantsOutput to check.
Returns:
True if the sanity check passes.
"""
if not call_variants_outputs:
return True # An empty list is a degenerate case.
if not _check_alt_allele_indices(call_variants_outputs):
return False
first_call, other_calls = call_variants_outputs[0], call_variants_outputs[1:]
# Sanity check that all call_variants_outputs have the same `variant`.
for call_to_check in other_calls:
if first_call.variant != call_to_check.variant:
logging.warning(
'Expected all inputs to merge_predictions to have the '
'same `variant`, but getting %s and %s.', first_call.variant,
call_to_check.variant)
return False
return True
def convert_call_variants_outputs_to_probs_dict(
canonical_variant,
call_variants_outputs,
alt_alleles_to_remove,
debug_output_all_candidates=None):
"""Converts a list of CallVariantsOutput to an internal allele probs dict.
Args:
canonical_variant: variants_pb2.Variant.
call_variants_outputs: list of CallVariantsOutput.
alt_alleles_to_remove: set of strings. Alleles to remove.
debug_output_all_candidates: If 'ALT', set low qual alleles to be
soft-filtered.
Returns:
Dictionary of {(allele1, allele2): list of probabilities},
where allele1 and allele2 are strings.
"""
flattened_dict = collections.defaultdict(list)
if not call_variants_outputs:
return flattened_dict
for call_variants_output in call_variants_outputs:
allele_set1 = frozenset([canonical_variant.reference_bases])
allele_set2 = frozenset(
canonical_variant.alternate_bases[index]
for index in call_variants_output.alt_allele_indices.indices)
has_alleles_to_rm = bool(alt_alleles_to_remove.intersection(allele_set2))
if has_alleles_to_rm and debug_output_all_candidates != 'ALT':
continue
if has_alleles_to_rm:
# This block is run when debug_output_all_candidates=ALT
# It sets genotype likelihood to a placeholder value,
# which is later used to set GL=1.0 (prob=0).
p11, p12, p22 = (_FILTERED_ALT_PROB, _FILTERED_ALT_PROB,
_FILTERED_ALT_PROB)
else:
p11, p12, p22 = call_variants_output.genotype_probabilities
for (set1, set2, p) in [(allele_set1, allele_set1, p11),
(allele_set1, allele_set2, p12),
(allele_set2, allele_set2, p22)]:
for indices in itertools.product(set1, set2):
flattened_dict[indices].append(p)
return flattened_dict
def get_alt_alleles_to_remove(call_variants_outputs, qual_filter):
"""Returns all the alt alleles with quality below qual_filter.
Quality is defined as (1-p(ref/ref)). This removes all alt alleles whose
quality is below the filter value, with the exception that if the set of
removed alt alleles covers everything in the alternate_bases, the single alt
allele where the 1-p(ref/ref) is the highest is retained.
Args:
call_variants_outputs: list of CallVariantsOutput.
qual_filter: double. The qual value below which to filter variants.
Returns:
Set of strings: alt alleles to remove.
"""
alt_alleles_to_remove = set() # first alt is represented as 0.
if not qual_filter or not call_variants_outputs:
return alt_alleles_to_remove
max_qual, max_qual_allele = None, None
canonical_variant = call_variants_outputs[0].variant
for call_variants_output in call_variants_outputs:
# Go through the ones where alt_allele_indices has
# exactly one element. There are the pileup images that contains information
# like:
# p00, p01, p11
# or p00, p02, p22
# ...p00, p0N, pNN
if len(call_variants_output.alt_allele_indices.indices) == 1:
# From here, we want to see which ones of these alt alleles (1-N) that we
# can skip. We can use the concept of QUAL in VCF, and filter out ones
# where QUAL < FLAGS.qual_filter. This is because if QUAL is too low,
# it means it is unlikely this has a variant genotype.
_, qual = compute_quals(
call_variants_output.genotype_probabilities, prediction_index=0)
alt_allele_index = call_variants_output.alt_allele_indices.indices[0]
# Keep track of one alt allele with the highest qual score.
if max_qual is None or max_qual < qual:
max_qual, max_qual_allele = (
qual, canonical_variant.alternate_bases[alt_allele_index])
if qual < qual_filter:
alt_alleles_to_remove.add(
canonical_variant.alternate_bases[alt_allele_index])
# If all alt alleles are below `qual_filter`, keep at least one.
if len(alt_alleles_to_remove) == len(canonical_variant.alternate_bases):
alt_alleles_to_remove -= set([max_qual_allele])
# redacted
return alt_alleles_to_remove
# redacted
class AlleleRemapper(object):
"""Facilitates removing alt alleles from a Variant.
This class provides a one-to-shop for managing the information needed to
remove alternative alleles from Variant. It provides functions and properties
to get the original alts, the new alts, and asking if alleles (strings) or
indices (integers) should be retained or eliminated.
"""
# redacted
def __init__(self, original_alt_alleles, alleles_to_remove):
self.original_alts = list(original_alt_alleles)
self.alleles_to_remove = set(alleles_to_remove)
def keep_index(self, allele_index, ref_is_zero=False):
if ref_is_zero:
return True if allele_index == 0 else self.keep_index(allele_index - 1)
else:
return self.original_alts[allele_index] not in self.alleles_to_remove
def retained_alt_alleles(self):
return [
alt for alt in self.original_alts if alt not in self.alleles_to_remove
]
def reindex_allele_indexed_fields(self, variant, fields):
"""Updates variant.call fields indexed by ref + alt_alleles.
Args:
variant: Variant proto. We will update the info fields of the Variant.call
protos.
fields: Iterable of string. Each string should provide a key to an
alternative allele indexed field in VariantCall.info fields. Each field
specified here will be updated to remove values associated with alleles
no longer wanted according to this remapper object.
"""
for field_info in fields:
field = field_info[0]
ref_is_zero = field_info[1]
for call in variant.calls:
if field in call.info:
entry = call.info[field]
updated = [
v for i, v in enumerate(entry.values)
if self.keep_index(i, ref_is_zero=ref_is_zero)
]
# We cannot do entry.values[:] = updated as the ListValue type "does
# not support assignment" so we have to do this grossness.
del entry.values[:]
entry.values.extend(updated)
def prune_alleles(variant, alt_alleles_to_remove):
"""Remove the alt alleles in alt_alleles_to_remove from canonical_variant.
Args:
variant: variants_pb2.Variant.
alt_alleles_to_remove: iterable of str. Alt alleles to remove from variant.
Returns:
variants_pb2.Variant with the alt alleles removed from alternate_bases.
"""
# If we aren't removing any alt alleles, just return the unmodified variant.
if not alt_alleles_to_remove:
return variant
new_variant = variants_pb2.Variant()
new_variant.CopyFrom(variant)
# Cleanup any VariantCall.info fields indexed by alt allele.
remapper = AlleleRemapper(variant.alternate_bases, alt_alleles_to_remove)
remapper.reindex_allele_indexed_fields(new_variant,
_ALT_ALLELE_INDEXED_FORMAT_FIELDS)
new_variant.alternate_bases[:] = remapper.retained_alt_alleles()
return new_variant
def get_multiallelic_distributions(call_variants_outputs, pruned_alleles):
"""Return 9 values for 3 distributions from given multiallelic CVOs.
This function is only called for sites with two alt alleles remaining after
pruning. However, call_variants_outputs contains CVOs from pruned and unpruned
alleles, so we ignore the CVOs containing alleles that were pruned.
Args:
call_variants_outputs: list of CVOs for a multiallelic site with exactly two
alts after pruning. For such a site, we would expect 3 CVOs (alt1, alt2,
alt1/2). However, there may be more than 3 CVOs if some alleles were
pruned at this site.
pruned_alleles: set of strings corresponding to pruned alleles. Used to
filter CVOs for pruned alleles.
Returns:
final_probs: array of shape (1, 9). The 9 values correspond to three model
output distributions. The first is from the image containing alt1, the
second is from the image for alt2, the third is from the image with both
alt1 and alt2.
"""
alt_allele_indices_to_probs = {}
# Find the CVOs with two alts, corresponding to the image with alt1 and alt2.
for cvo in call_variants_outputs:
indices = cvo.alt_allele_indices.indices[:]
curr_alleles = [cvo.variant.alternate_bases[i] for i in indices]
curr_alleles_pruned = any([a in pruned_alleles for a in curr_alleles])
# Ignore CVOs containing pruned alleles.
if len(indices) == 2 and not curr_alleles_pruned:
first_alt_index = min(indices)
second_alt_index = max(indices)
probs = cvo.genotype_probabilities[:]
alt_allele_indices_to_probs[(first_alt_index, second_alt_index)] = probs
# Find the single alt CVOs.
for cvo in call_variants_outputs:
if len(cvo.alt_allele_indices.indices[:]) == 1:
index = cvo.alt_allele_indices.indices[0]
if index == first_alt_index or index == second_alt_index:
probs = cvo.genotype_probabilities[:]
alt_allele_indices_to_probs[index] = probs
assert len(alt_allele_indices_to_probs) == 3
# Concatenate all probabilities into one array.
final_probs = np.array([
alt_allele_indices_to_probs[first_alt_index] +
alt_allele_indices_to_probs[second_alt_index] +
alt_allele_indices_to_probs[(first_alt_index, second_alt_index)]
])
return final_probs
def get_multiallelic_model(use_multiallelic_model):
"""Loads and returns the model, which must be in saved model format.
Args:
use_multiallelic_model: if True, use a specialized model for genotype
resolution of multiallelic cases with two alts.
Returns:
A keras model instance if use_multiallelic_model, else None.
"""
if not use_multiallelic_model:
return None
curr_dir = os.path.dirname(__file__)
multiallelic_model_path = os.path.join(curr_dir, 'multiallelic_model')
return tf.keras.models.load_model(multiallelic_model_path, compile=False)
def normalize_predictions(predictions):
"""Normalize predictions and handle soft-filtered alt alleles."""
if sum(predictions) == 0:
predictions = [1.0] * len(predictions)
denominator = sum(
[i if i != _FILTERED_ALT_PROB else 0.0 for i in predictions]) or 1.0
normalized_predictions = [
i / denominator if i != _FILTERED_ALT_PROB else 0.0 for i in predictions
]
return normalized_predictions
def merge_predictions(call_variants_outputs,
qual_filter=None,
multiallelic_model=None,
debug_output_all_candidates=None):
"""Merges the predictions from the multi-allelic calls."""
# See the logic described in the class PileupImageCreator pileup_image.py
#
# Because of the logic above, this function expects all cases above to have
# genotype_predictions that we can combine from.
if not call_variants_outputs:
raise ValueError('Expected 1 or more call_variants_outputs.')
if not is_valid_call_variants_outputs(call_variants_outputs):
raise ValueError('`call_variants_outputs` did not pass sanity check.')
first_call, other_calls = call_variants_outputs[0], call_variants_outputs[1:]
canonical_variant = first_call.variant
if not other_calls:
canonical_variant = variant_utils.simplify_variant_alleles(
canonical_variant)
return canonical_variant, first_call.genotype_probabilities
alt_alleles_to_remove = get_alt_alleles_to_remove(call_variants_outputs,
qual_filter)
# flattened_probs_dict is only used with the multiallelic model
flattened_probs_dict = convert_call_variants_outputs_to_probs_dict(
canonical_variant, call_variants_outputs, alt_alleles_to_remove,
debug_output_all_candidates)
if debug_output_all_candidates == 'INFO':
add_string_field(canonical_variant.info, 'CANDIDATES',
'|'.join(canonical_variant.alternate_bases))
if debug_output_all_candidates != 'ALT':
canonical_variant = prune_alleles(canonical_variant, alt_alleles_to_remove)
# Run alternate model for multiallelic cases.
num_alts = len(canonical_variant.alternate_bases)
if num_alts == 2 and multiallelic_model is not None:
# We have 3 CVOs for 2 alts. In this case, there are 6 possible genotypes.
cvo_probs = get_multiallelic_distributions(call_variants_outputs,
alt_alleles_to_remove)
normalized_predictions = multiallelic_model(cvo_probs).numpy().tolist()[0]
else:
def min_alt_filter(probs):
return min([x for x in probs if x != _FILTERED_ALT_PROB] or [0])
predictions = [
min_alt_filter(flattened_probs_dict[(m, n)]) for _, _, m, n in
variant_utils.genotype_ordering_in_likelihoods(canonical_variant)
]
if sum(predictions) == 0:
predictions = [1.0] * len(predictions)
normalized_predictions = normalize_predictions(predictions)
# Note the simplify_variant_alleles call *must* happen after the predictions
# calculation above. flattened_probs_dict is indexed by alt allele, and
# simplify can change those alleles so we cannot simplify until afterwards.
canonical_variant = variant_utils.simplify_variant_alleles(canonical_variant)
return canonical_variant, normalized_predictions
def write_variants_to_vcf(variant_iterable, output_vcf_path, header):
"""Writes Variant protos to a VCF file.
Args:
variant_iterable: iterable. An iterable of sorted Variant protos.
output_vcf_path: str. Output file in VCF format.
header: VcfHeader proto. The VCF header to use for writing the variants.
"""
logging.info('Writing output to VCF file: %s', output_vcf_path)
with vcf.VcfWriter(
output_vcf_path, header=header, round_qualities=True) as writer:
count = 0
for variant in variant_iterable:
if (not FLAGS.only_keep_pass or
variant.filter == [dv_vcf_constants.DEEP_VARIANT_PASS]):
count += 1
writer.write(variant)
logging.log_every_n(logging.INFO, '%s variants written.', _LOG_EVERY_N,
count)
def _zero_scale_gl(variant):
"""Zero-scales GL to mimic write-then-read.
When writing variants using VcfWriter, GLs are converted to PLs, which is an
integer format scaled so the most likely genotype has value 0. This function
modifies the input variant to mimic this transformation of GL -> PL -> GL.
Args:
variant: Variant proto. The variant to scale.
Returns:
variant: Variant proto. The input variant with its GLs modified.
"""
call = variant_utils.only_call(variant)
max_gl = max(call.genotype_likelihood)
call.genotype_likelihood[:] = [
(gl - max_gl) for gl in call.genotype_likelihood
]
return variant
def _sort_grouped_variants(group):
return sorted(group, key=lambda x: sorted(x.alt_allele_indices.indices))
def _transform_call_variants_output_to_variants(input_sorted_tfrecord_path,
qual_filter,
multi_allelic_qual_filter,
sample_name, group_variants,
use_multiallelic_model,
debug_output_all_candidates):
"""Yields Variant protos in sorted order from CallVariantsOutput protos.
Variants present in the input TFRecord are converted to Variant protos, with
the following filters applied: 1) variants are omitted if their quality is
lower than the `qual_filter` threshold. 2) multi-allelic variants omit
individual alleles whose qualities are lower than the
`multi_allelic_qual_filter` threshold.
Args:
input_sorted_tfrecord_path: str. TFRecord format file containing sorted
CallVariantsOutput protos.
qual_filter: double. The qual value below which to filter variants.
multi_allelic_qual_filter: double. The qual value below which to filter
multi-allelic variants.
sample_name: str. Sample name to write to VCF file.
group_variants: bool. If true, group variants that have same start and end
position.
use_multiallelic_model: if True, use a specialized model for genotype
resolution of multiallelic cases with two alts.
debug_output_all_candidates: if 'ALT', output all alleles considered by
DeepVariant as ALT alleles.
Yields:
Variant protos in sorted order representing the CallVariantsOutput calls.
"""
multiallelic_model = get_multiallelic_model(
use_multiallelic_model=use_multiallelic_model)
group_fn = None
if group_variants:
group_fn = lambda x: variant_utils.variant_range(x.variant)
for _, group in itertools.groupby(
tfrecord.read_tfrecords(
input_sorted_tfrecord_path, proto=deepvariant_pb2.CallVariantsOutput),
group_fn):
outputs = _sort_grouped_variants(group)
canonical_variant, predictions = merge_predictions(
outputs,
multi_allelic_qual_filter,
multiallelic_model=multiallelic_model,
debug_output_all_candidates=debug_output_all_candidates)
variant = add_call_to_variant(
canonical_variant,
predictions,
qual_filter=qual_filter,
sample_name=sample_name)
yield variant
def _get_contig_based_variant_sort_keyfn(contigs):
"""Returns a callable used to sort variants based on genomic position.
Args:
contigs: list(ContigInfo). The list of contigs in the desired sort order.
Returns:
A callable that takes a single Variant proto as input and returns a value
that sorts based on contig and then start position. Note that if the variant
has a contig not represented in the list of contigs this will raise
IndexError.
"""
contig_index = {contig.name: ix for ix, contig in enumerate(contigs)}
def keyfn(variant):
return contig_index[variant.reference_name], variant.start
return keyfn
def _get_contig_based_lessthan(contigs):
"""Returns a callable that compares variants on genomic position.
The returned function takes two arguments, both of which should be Variant
protos or None. The function returns True if and only if the first Variant is
strictly less than the second, which occurs if the first variant is on a
previous chromosome or is on the same chromosome and its entire span lies
before the start position of the second variant. `None` is treated as a
sentinel value that does not compare less than any valid Variant.
Args:
contigs: list(ContigInfo). The list of contigs in the desired sort order.
Returns:
A callable that takes two Variant protos as input and returns True iff the
first is strictly less than the second. Note that if the variant has a
contig not represented in the list of contigs this will raise IndexError.
"""
contig_index = {contig.name: i for i, contig in enumerate(contigs)}
def lessthanfn(variant1, variant2):
if variant1 is None:
return False
if variant2 is None:
return True
contig1 = contig_index[variant1.reference_name]
contig2 = contig_index[variant2.reference_name]
return (contig1 < contig2 or
(contig1 == contig2 and variant1.end <= variant2.start))
return lessthanfn
def _create_record_from_template(template, start, end, fasta_reader):
"""Returns a copy of the template variant with the new start and end.
Updates to the start position cause a different reference base to be set.
Args:
template: third_party.nucleus.protos.Variant. The template variant whose
non-location and reference base information to use.
start: int. The desired new start location.
end: int. The desired new end location.
fasta_reader: GenomeReferenceFai object. The reader used to determine the
correct start base to use for the updated variant.
Returns:
An updated third_party.nucleus.protos.Variant with the proper start, end,
and reference base set and all other fields inherited from the template.
"""
retval = copy.deepcopy(template)
retval.start = start
retval.end = end
if start != template.start:
retval.reference_bases = fasta_reader.query(
ranges.make_range(retval.reference_name, start, start + 1))
return retval
def _transform_to_gvcf_record(variant):
"""Modifies a variant to include gVCF allele and associated likelihoods.
Args:
variant: third_party.nucleus.protos.Variant. The Variant to modify.
Returns:
The variant after applying the modification to its alleles and
allele-related FORMAT fields.
"""
if vcf_constants.GVCF_ALT_ALLELE not in variant.alternate_bases:
variant.alternate_bases.append(vcf_constants.GVCF_ALT_ALLELE)
# Add one new GL for het allele/gVCF for each of the other alleles, plus one
# for the homozygous gVCF allele.
num_new_gls = len(variant.alternate_bases) + 1
call = variant_utils.only_call(variant)
call.genotype_likelihood.extend([_GVCF_ALT_ALLELE_GL] * num_new_gls)
if call.info and 'AD' in call.info:
call.info['AD'].values.extend([struct_pb2.Value(int_value=0)])
if call.info and 'VAF' in call.info:
call.info['VAF'].values.extend([struct_pb2.Value(number_value=0)])
return variant
def merge_and_write_variants_and_nonvariants(variant_iterable,
nonvariant_iterable, lessthan,
fasta_reader, vcf_writer,
gvcf_writer):
"""Writes records consisting of the merging of variant and non-variant sites.
The merging strategy used for single-sample records is to emit variants
without modification. Any non-variant sites that overlap a variant are
truncated to only report on regions not affected by the variant. Note that
Variants are represented using zero-based half-open coordinates, so a VCF
record of `chr1 10 A T` would have `start=9` and `end=10`.
Args:
variant_iterable: Iterable of Variant protos. A sorted iterable of the
variants to merge.
nonvariant_iterable: Iterable of Variant protos. A sorted iterable of the
non-variant sites to merge.
lessthan: Callable. A function that takes two Variant protos as input and
returns True iff the first argument is located "before" the second and the
variants do not overlap.
fasta_reader: GenomeReferenceFai object. The reference genome reader used to
ensure gVCF records have the correct reference base.
vcf_writer: VcfWriter. Writes variants to VCF.
gvcf_writer: VcfWriter. Writes merged variants and nonvariants to gVCF.
"""
def next_or_none(iterable):
try:
return next(iterable)
except StopIteration:
return None
variant = next_or_none(variant_iterable)
nonvariant = next_or_none(nonvariant_iterable)
while variant is not None or nonvariant is not None:
if lessthan(variant, nonvariant):
if (not FLAGS.only_keep_pass or
variant.filter == [dv_vcf_constants.DEEP_VARIANT_PASS]):
vcf_writer.write(variant)
gvcf_variant = _transform_to_gvcf_record(_zero_scale_gl(variant))
gvcf_writer.write(gvcf_variant)
variant = next_or_none(variant_iterable)
continue
elif lessthan(nonvariant, variant):
gvcf_writer.write(nonvariant)
nonvariant = next_or_none(nonvariant_iterable)
continue
else:
# The variant and non-variant are on the same contig and overlap.
assert max(variant.start, nonvariant.start) < min(
variant.end, nonvariant.end), '{} and {}'.format(variant, nonvariant)
if nonvariant.start < variant.start:
# Write a non-variant region up to the start of the variant.
v = _create_record_from_template(nonvariant, nonvariant.start,
variant.start, fasta_reader)
gvcf_writer.write(v)
if nonvariant.end > variant.end:
# There is an overhang of the non-variant site after the variant is
# finished, so update the non-variant to point to that.
nonvariant = _create_record_from_template(nonvariant, variant.end,
nonvariant.end, fasta_reader)
else:
# This non-variant site is subsumed by a Variant. Ignore it.
nonvariant = next_or_none(nonvariant_iterable)
def _get_base_path(input_vcf):
"""Returns the base path for the output files.
Args:
input_vcf: string. Path to VCF for which to compute stats.
Returns:
A string with the base path.
"""
if input_vcf.endswith('.vcf'):
return input_vcf[:-4]
elif input_vcf.endswith('.vcf.gz'):
return input_vcf[:-7]
else:
return input_vcf
def _decide_to_use_csi(contigs):
"""Return True if CSI index is to be used over tabix index format.
If the length of any reference chromosomes exceeds 512M
(here we use 5e8 to keep a safety margin), we will choose csi
as the index format. Otherwise we use tbi as default.
Args:
contigs: list of contigs.
Returns:
A boolean variable indicating if the csi format is to be used or not.
"""
max_chrom_length = max([c.n_bases for c in contigs])
return max_chrom_length > 5e8
def build_index(vcf_file, csi=False):
"""A helper function for indexing VCF files.
Args:
vcf_file: string. Path to the VCF file to be indexed.
csi: bool. If true, index using the CSI format.
"""
if csi:
tabix.build_csi_index(vcf_file, min_shift=14)
else:
tabix.build_index(vcf_file)
def get_cvo_paths_and_first_record():
"""Returns sharded filenames for and one record from CVO input file."""
paths = sharded_file_utils.maybe_generate_sharded_filenames(FLAGS.infile)
record = tf_utils.get_one_example_from_examples_path(
','.join(paths), proto=deepvariant_pb2.CallVariantsOutput)
return paths, record
def get_sample_name():
"""Determines the sample name to be used for the output VCF and gVCF.
We check the following sources to determine the sample name and use the first
name available:
1) CallVariantsOutput
2) nonvariant site TFRecords
3) --sample_name flag
4) default sample name
Returns:
sample_name used when writing the output VCF and gVCF.
"""
_, record = get_cvo_paths_and_first_record()
if FLAGS.nonvariant_site_tfrecord_path:
gvcf_record = tf_utils.get_one_example_from_examples_path(
FLAGS.nonvariant_site_tfrecord_path, proto=variants_pb2.Variant)
if record is not None:
sample_name = _extract_single_sample_name(record)
logging.info('Using sample name from call_variants output. Sample name: %s',
sample_name)
if FLAGS.sample_name:
logging.info('--sample_name is set but was not used.')
elif FLAGS.nonvariant_site_tfrecord_path and gvcf_record and gvcf_record.calls:
sample_name = gvcf_record.calls[0].call_set_name
logging.info(
'call_variants output is empty, so using sample name from TFRecords at '
'--nonvariant_site_tfrecord_path. Sample name: %s', sample_name)
if FLAGS.sample_name:
logging.info('--sample_name is set but was not used.')
elif FLAGS.sample_name:
sample_name = FLAGS.sample_name
logging.info(
'call_variants output and nonvariant TFRecords are empty. Using sample '
'name set with --sample_name. Sample name: %s', sample_name)
else:
sample_name = dv_constants.DEFAULT_SAMPLE_NAME
logging.info(
'Could not determine sample name and --sample_name is unset. Using the '
'default sample name. Sample name: %s', sample_name)
return sample_name
def main(argv=()):
with errors.clean_commandline_error_exit():
if len(argv) > 1:
errors.log_and_raise(
'Command line parsing failure: postprocess_variants does not accept '
'positional arguments but some are present on the command line: '
'"{}".'.format(str(argv)), errors.CommandLineError)
del argv # Unused.
if (not FLAGS.nonvariant_site_tfrecord_path) != (not FLAGS.gvcf_outfile):
errors.log_and_raise(
'gVCF creation requires both nonvariant_site_tfrecord_path and '
'gvcf_outfile flags to be set.', errors.CommandLineError)
if (FLAGS.use_multiallelic_model and
FLAGS.debug_output_all_candidates == 'ALT'):
errors.log_and_raise(
'debug_output_all_candidates=ALT is incompatible with the '
'multiallelic model. Use INFO instead.', errors.CommandLineError)
proto_utils.uses_fast_cpp_protos_or_die()
logging_level.set_from_flag()
fasta_reader = fasta.IndexedFastaReader(
FLAGS.ref, cache_size=_FASTA_CACHE_SIZE)
contigs = fasta_reader.header.contigs
sample_name = get_sample_name()
cvo_paths, cvo_record = get_cvo_paths_and_first_record()
if cvo_record is None:
logging.info('call_variants_output is empty. Writing out empty VCF.')
variant_generator = iter([])
else:
temp = tempfile.NamedTemporaryFile()
start_time = time.time()
postprocess_variants_lib.process_single_sites_tfrecords(
contigs, cvo_paths, temp.name)
logging.info('CVO sorting took %s minutes',
(time.time() - start_time) / 60)
logging.info('Transforming call_variants_output to variants.')
independent_variants = _transform_call_variants_output_to_variants(
input_sorted_tfrecord_path=temp.name,
qual_filter=FLAGS.qual_filter,
multi_allelic_qual_filter=FLAGS.multi_allelic_qual_filter,
sample_name=sample_name,
group_variants=FLAGS.group_variants,
use_multiallelic_model=FLAGS.use_multiallelic_model,
debug_output_all_candidates=FLAGS.debug_output_all_candidates)
variant_generator = haplotypes.maybe_resolve_conflicting_variants(
independent_variants)
add_info_candidates = FLAGS.debug_output_all_candidates == 'INFO'
header = dv_vcf_constants.deepvariant_header(
contigs=contigs,
sample_names=[sample_name],
add_info_candidates=add_info_candidates)
use_csi = _decide_to_use_csi(contigs)
start_time = time.time()
if not FLAGS.nonvariant_site_tfrecord_path:
logging.info('Writing variants to VCF.')
write_variants_to_vcf(
variant_iterable=variant_generator,
output_vcf_path=FLAGS.outfile,
header=header)
if FLAGS.outfile.endswith('.gz'):
build_index(FLAGS.outfile, use_csi)
logging.info('VCF creation took %s minutes',
(time.time() - start_time) / 60)
else:
logging.info('Merging and writing variants to VCF and gVCF.')
lessthanfn = _get_contig_based_lessthan(contigs)
with vcf.VcfWriter(
FLAGS.outfile, header=header, round_qualities=True) as vcf_writer, \
vcf.VcfWriter(
FLAGS.gvcf_outfile, header=header, round_qualities=True) \
as gvcf_writer:
nonvariant_generator = tfrecord.read_shard_sorted_tfrecords(
FLAGS.nonvariant_site_tfrecord_path,
key=_get_contig_based_variant_sort_keyfn(contigs),
proto=variants_pb2.Variant)
merge_and_write_variants_and_nonvariants(variant_generator,
nonvariant_generator,
lessthanfn, fasta_reader,
vcf_writer, gvcf_writer)
if FLAGS.outfile.endswith('.gz'):
build_index(FLAGS.outfile, use_csi)
if FLAGS.gvcf_outfile.endswith('.gz'):
build_index(FLAGS.gvcf_outfile, use_csi)
logging.info('Finished writing VCF and gVCF in %s minutes.',
(time.time() - start_time) / 60)
if FLAGS.vcf_stats_report:
outfile_base = _get_base_path(FLAGS.outfile)
with vcf.VcfReader(FLAGS.outfile) as reader:
vcf_stats.create_vcf_report(
variants=reader.iterate(),
output_basename=outfile_base,
sample_name=sample_name,
vcf_reader=reader)
if cvo_record:
temp.close()
if __name__ == '__main__':
flags.mark_flags_as_required(['infile', 'outfile', 'ref'])
tf.compat.v1.app.run()
| google/deepvariant | deepvariant/postprocess_variants.py | Python | bsd-3-clause | 49,805 |
from selenium.common.exceptions import NoSuchElementException
from .base import FunctionalTest, login_test_user_with_browser
class NewVisitorTest(FunctionalTest):
def test_default_page(self):
# 지훈이는 멋진 게시판 앱이 나왔다는 소식을 듣고
# 해당 웹 사이트를 확인하러 간다.
self.browser.get(self.live_server_url)
# 타이틀이 'Home'를 표시하고 있다.
self.assertIn('Home', self.browser.title)
# header navbar의 로고에 'K-Board'라고 씌여져 있다.
logo_text = self.browser.find_element_by_class_name('navbar-brand')
self.assertEqual('K-Board', logo_text.text)
# navbar에 'Default' 게시판이 보인다.
navbar_item = self.browser.find_elements_by_class_name('navbar-item')
self.assertEqual('Default', navbar_item[0].text)
# 박스에 게시판 하나가 보인다.
boards = self.browser.find_elements_by_class_name('panel-post-summary')
self.assertEqual(len(boards), 1)
# 그 게시판에는 'Default'라고 씌여져 있다.
panel_title = boards[0].find_element_by_css_selector('.panel-heading > a')
self.assertEqual(panel_title.text, 'Default')
# 지훈이는 첫 번째에 있는 'Default'게시판에 들어간다.
self.move_to_default_board()
# 게시판에 아무런 글이 없다.
tbody = self.browser.find_element_by_tag_name('tbody')
with self.assertRaises(NoSuchElementException):
tbody.find_element_by_tag_name('tr')
# 글 하나를 작성한다.
self.add_post('Hello', 'Hello guys')
# 지훈이는 다른 게시판이 있나 보려고 로고 버튼을 눌러 게시판 목록 페이지로 돌아간다.
home_button = self.browser.find_element_by_class_name('navbar-brand')
home_button.click()
# url이 / 이다.
self.assertRegex(self.browser.current_url, '.+/$')
# Default 게시판 panel에 작성한 글이 보인다.
boards = self.browser.find_elements_by_class_name('panel-post-summary')
panel_title = boards[0].find_element_by_css_selector('.panel-heading > a')
panel_posts = boards[0].find_elements_by_css_selector('table tr')
self.assertEqual(panel_title.text, 'Default')
self.assertEqual(len(panel_posts), 1)
self.assertEqual(panel_posts[0].text, 'Hello')
@login_test_user_with_browser
def test_write_post_and_confirm_post_view(self):
self.move_to_default_board()
# 지훈이는 새 게시글을 작성하기 위해 글 쓰기 버튼을 누른다.
self.click_create_post_button()
# 글 쓰기 페이지로 이동한다.
self.assertRegex(self.browser.current_url, '.+/boards/default/posts/new/')
# 웹 페이지 타이틀과 헤더가 'Create Post'를 표시하고 있다.
header_text = self.browser.find_element_by_tag_name('h3').text
self.assertIn('글 쓰기', self.browser.title)
self.assertIn('글 쓰기', header_text)
# 제목을 입력하는 상자에 'Insert Title'라고 씌여 있다.
titlebox = self.browser.find_element_by_id('id_post_title')
self.assertEqual(
titlebox.get_attribute('placeholder'),
'Insert Title'
)
# "Title of This Post"라고 제목 상자에 입력한다.
titlebox.send_keys('Title of This Post')
contentbox = self.get_contentbox()
# "Content of This Post"라고 본문 상자에 입력한다.
contentbox.send_keys('Content of This Post')
self.browser.switch_to.default_content()
# 하단의 등록 버튼을 누르면 글 작성이 완료되고 게시글 목록으로 돌아간다.
self.click_submit_button()
self.assertRegex(self.browser.current_url, '.+/boards/default/')
# 게시글 목록 페이지의 타이틀에 'Default'라고 씌여져 있다.
header_text = self.browser.find_element_by_tag_name('h3').text
self.assertIn('Default', self.browser.title)
self.assertIn('Default', header_text)
# 게시글 목록에 "1: Title of This Post"라고 씌여져 있다.
self.check_for_row_in_list_table('id_post_list_table', 'Title of This Post')
# 게시글 목록 하단에 있는 '글쓰기' 버튼을 눌러서 새 글을 작성한다.
self.click_create_post_button()
# "Title of Second Post"라고 제목 상자에 입력한다.
titlebox = self.browser.find_element_by_id('id_post_title')
titlebox.send_keys('Title of Second Post')
# "Content of Second Post"라고 본문 상자에 입력한다
contentbox = self.get_contentbox()
contentbox.send_keys('Content of Second Post')
self.browser.switch_to.default_content()
# 하단의 등록 버든틍 누르면 글 작성이 완료되고 게시글 목록으로 돌아간다.
self.click_submit_button()
self.assertRegex(self.browser.current_url, '.+/boards/default/')
# 게시글 목록에 두 개의 게시글 제목이 보인다.
self.check_for_row_in_list_table('id_post_list_table', 'Title of Second Post')
self.check_for_row_in_list_table('id_post_list_table', 'Title of This Post')
# 지훈이는 게시글이 잘 작성 되었는지 확인하고 싶어졌다.
# 'Title of This Post' 게시글을 클릭한다.
table = self.browser.find_element_by_id('id_post_list_table')
rows = table.find_elements_by_css_selector('tbody > tr > td > a')
rows[1].click()
# 게시글에 대한 자세한 내용을 보여주는 새로운 창이 뜬다.
self.assertRegex(self.browser.current_url, '.+/posts/(\d+)/')
# 게시글 페이지의 타이틀에는 'Title of This Post'라고 씌여져 있다.
self.assertIn('Title of This Post', self.browser.title)
# 게시글의 제목에는 'Title of This Post'이 표시되고
post_title = self.browser.find_element_by_css_selector('.post-panel .panel-title').text
self.assertIn('Title of This Post', post_title)
# 게시글의 내용에는 'Content of This Post'이 표시된다.
post_content = self.browser.find_element_by_css_selector('.post-panel .panel-body').text
self.assertIn('Content of This Post', post_content)
# 게시글의 제목 옆에는 IP가 표시된다.
post_ip = self.browser.find_element_by_id('id_post_ip').text
self.assertRegex(post_ip, 'IP: \d{1,3}\.\d{1,3}\.xxx\.\d{1,3}')
# 지훈이는 게시글 내용 하단의 댓글 란에 'This is a comment'라고 입력한다.
comment_iframe = self.browser.find_element_by_class_name('comment-iframe')
self.browser.switch_to.frame(comment_iframe)
comment = self.browser.find_element_by_id('id_new_comment')
comment.send_keys('This is a comment')
# '댓글 달기' 버튼을 누른다.
comment_submit = self.browser.find_element_by_id('id_new_comment_submit')
comment_submit.click()
# 댓글이 달리고, 'This is a comment'라는 댓글이 보인다.
comment_list = self.browser.find_element_by_class_name("comment")
comments = comment_list.find_elements_by_tag_name('p')
self.assertEqual(comments[0].text, 'This is a comment')
# 댓글에는 작성된 시간이 표시된다.
comment_date = comment_list.find_element_by_class_name('comment-date')
self.assertRegex(comment_date.text, '\d{4}-[01]\d-[0-3]\d [0-2]\d:[0-5]\d:[0-5]\d')
# 댓글이 마음에 들지 않아 다시 삭제하려고 한다. 댓글 우측에 삭제 버튼을 누른다.
remove_comment_button = self.browser.find_element_by_class_name("delete-comment")
remove_comment_button.click()
# 남아있는 댓글이 없는 것을 확인한다.
self.browser.find_elements_by_css_selector(".no-comment")
# 게시글과 댓글이 잘 삭제된 것을 확인한 지훈이는 다시 게시글 목록을 보여주는 페이지로 돌아가기 위해 게시글 하단의 '목록' 버튼을 누른다.
self.browser.switch_to.default_content()
create_post_button = self.browser.find_element_by_id('id_back_to_post_list_button')
create_post_button.click()
# 게시글 목록 페이지가 뜬다.
self.assertRegex(self.browser.current_url, '.+/boards/default/$')
# 지훈이는 새 게시글 작성 중에 취소 기능을 확인하기 위해 다시 '글쓰기' 버튼을 누른다
self.click_create_post_button()
# 취소 버튼을 누르면
self.browser.find_element_by_id('id_cancel_button').click()
# 게시글 목록 페이지로 돌아온다.
self.assertRegex(self.browser.current_url, '.+/boards/default/$')
@login_test_user_with_browser
def test_forbid_comment_input_when_does_not_login(self):
# 지훈이는 로그인을 한 상태로 글을 작성한다.
self.move_to_default_board()
self.add_post('hello', 'content')
# 게시글 목록 페이지가 보여지고 있다.
self.assertRegex(self.browser.current_url, '.+/boards/default/$')
# 익명으로 댓글을 달고 싶어 로그아웃한다.
self.logout()
# 게시판에 들어간다.
self.move_to_default_board()
# 게시글에 들어간다.
post_list = self.browser.find_elements_by_css_selector('#id_post_list_table > tbody > tr > td > a')
post_list[0].click()
# 댓글 입력하는 form이 없고 '댓글을 달기 위해 로그인하세요.'가 보인다.
comment_iframe = self.browser.find_element_by_class_name('comment-iframe')
self.browser.switch_to.frame(comment_iframe)
self.browser.find_element_by_class_name('comment-require-login')
| cjh5414/kboard | kboard/functional_test/test_post_creation_and_management.py | Python | mit | 9,992 |
"""
This code includes objectives for deep learning loss function.
Every computations are tensor operations.
"""
import numpy as np
import theano
import theano.tensor as T
from lemontree.layers.layer import BaseLayer
class CategoricalCrossentropy(BaseLayer):
def __init__(self, stabilize=False, mode='mean'):
"""
This function initializes the class.
Parameters
----------
stabilize: bool, default: False
a bool value to use stabilization or not.
if yes, input_ions are clipped to small, nonnegative values to prevent NaNs.
the input_ion slightly ignores the probability distribution assumtion of sum = 1.
for most cases, it is OK to use 'False'.
however, if you are using many-class such as imagenet, this option may matter.
mode: string {'mean', 'sum'}, default: 'mean'
a string to choose how to compute loss as a scalar.
'mean' computes loss as an average loss through (mini) batch.
'sum' computes loss as a sum loss through (mini) batch.
Returns
-------
None.
"""
# check assert
assert isinstance(stabilize, bool), '"stabilize" should be a bool value.'
assert mode in ['mean', 'sum'], '"mode" should be either "mean" or "sum".'
# set members
self.stabilize = stabilize
self.mode = mode
def get_output(self, input_, label, mask=None):
"""
This function overrides the parents' one.
Computes the loss by model input_ion and real label.
use theano implemented categorical_crossentropy directly.
Parameters
----------
input_: TensorVariable
an array of (batch size, input_ion).
for cross entropy task, "input_" is 2D matrix.
label: TensorVariable
an array of (batch size, answer) or (batchsize,) if label is a list of class labels.
for classification, highly recommend second one.
mask: TensorVariable
an array of (batchsize,) only contains 0 and 1.
loss are summed or averaged only through 1.
Returns
-------
TensorVariable
a symbolic tensor variable which is scalar.
"""
# do
if mask is None:
if self.mode == 'mean':
if self.stabilize:
return T.mean(T.nnet.categorical_crossentropy(T.clip(input_, 1e-7, 1.0 - 1e-7), label))
else:
return T.mean(T.nnet.categorical_crossentropy(input_, label))
elif self.mode == 'sum':
if self.stabilize:
return T.sum(T.nnet.categorical_crossentropy(T.clip(input_, 1e-7, 1.0 - 1e-7), label))
else:
return T.sum(T.nnet.categorical_crossentropy(input_, label))
else:
raise ValueError('Not implemented mode entered. Mode should be in {mean, sum}.')
else:
if self.mode == 'mean':
if self.stabilize:
return T.sum(T.nnet.categorical_crossentropy(T.clip(input_, 1e-7, 1.0 - 1e-7), label) * mask) / T.sum(mask)
else:
return T.sum(T.nnet.categorical_crossentropy(input_, label) * mask) / T.sum(mask)
elif self.mode == 'sum':
if self.stabilize:
return T.sum(T.nnet.categorical_crossentropy(T.clip(input_, 1e-7, 1.0 - 1e-7), label) * mask)
else:
return T.sum(T.nnet.categorical_crossentropy(input_, label) * mask)
else:
raise ValueError('Not implemented mode entered. Mode should be in {mean, sum}.')
class CategoricalAccuracy(BaseLayer):
def __init__(self, top_k=1):
"""
This function initializes the class.
Parameters
----------
top_k: int, default: 1
an integer that determines what will be correct.
for k > 1, if an answer is in top-k probable labels, assigned as correct one.
Returns
-------
None.
"""
# check assert
assert isinstance(top_k, int) and top_k > 0, '"top_k" should be a positive integer.'
# set members
self.top_k = top_k
def get_output(self, input_, label, mask=None):
"""
This function overrides the parents' one.
Computes the loss by model input_ion and real label.
Parameters
----------
input_: TensorVariable
an array of (batch size, input_ion).
for accuracy task, "input_" is 2D matrix.
label: TensorVariable
an array of (batch size, answer) or (batchsize,) if label is a list of class labels.
for classification, highly recommend second one.
should make label as integer.
mask: TensorVariable
an array of (batchsize,) only contains 0 and 1.
loss are summed or averaged only through 1.
Returns
-------
TensorVariable
a symbolic tensor variable which is scalar.
"""
# do
if mask is None:
if self.top_k == 1:
if label.ndim == 1:
return T.mean(T.eq(T.argmax(input_, axis=-1), label))
elif label.ndim == 2:
return T.mean(T.eq(T.argmax(input_, axis=-1), T.argmax(label, axis=-1)))
else:
raise ValueError()
else:
# TODO: not yet tested
top_k_input_ = T.argsort(input_)[:, -self.top_k:] # sort by values and keep top k indices
if label.ndim == 1:
return T.mean(T.any(T.eq(top_k_input_, label), axis=-1))
elif label.ndim == 2:
return T.mean(T.any(T.eq(top_k_input_, T.argmax(label,axis=-1)), axis=-1))
raise ValueError()
else:
if self.top_k == 1:
if label.ndim == 1:
return T.sum(T.eq(T.argmax(input_, axis=-1), label) * mask) / T.sum(mask)
elif label.ndim == 2:
return T.sum(T.eq(T.argmax(input_, axis=-1), T.argmax(label, axis=-1)) * mask) / T.sum(mask)
else:
raise ValueError()
else:
# TODO: not yet tested
top_k_input_ = T.argsort(input_)[:, -self.top_k:] # sort by values and keep top k indices
if label.ndim == 1:
return T.sum(T.any(T.eq(top_k_input_, label), axis=-1) * mask) / T.sum(mask)
elif label.ndim == 2:
return T.sum(T.any(T.eq(top_k_input_, T.argmax(label,axis=-1)), axis=-1) * mask) / T.sum(mask)
raise ValueError()
class BinaryCrossentropy(BaseLayer):
def __init__(self, stabilize=False, mode='mean'):
"""
This function initializes the class.
Parameters
----------
stabilize: bool, default: False
a bool value to use stabilization or not.
if yes, input_ions are clipped to small, nonnegative values to prevent NaNs.
the input_ion slightly ignores the probability distribution assumtion of sum = 1.
for most cases, it is OK to use 'False'.
however, if you are using many-class such as imagenet, this option may matter.
mode: string {'mean', 'sum'}, default: 'mean'
a string to choose how to compute loss as a scalar.
'mean' computes loss as an average loss through (mini) batch.
'sum' computes loss as a sum loss through (mini) batch.
Returns
-------
None.
"""
# check assert
assert isinstance(stabilize, bool), '"stabilize" should be a bool value.'
assert mode in ['mean', 'sum'], '"mode" should be either "mean" or "sum".'
# set members
self.stabilize = stabilize
self.mode = mode
def get_output(self, input_, label, mask=None):
"""
This function overrides the parents' one.
Computes the loss by model input_ion and real label.
use theano implemented binary_crossentropy directly.
Parameters
----------
input_: TensorVariable
an array of (batch size, input_ion).
for cross entropy task, "input_" is 2D matrix.
label: TensorVariable
an array of or (batchsize,) whose value is 0 or 1.
mask: TensorVariable
an array of (batchsize,) only contains 0 and 1.
loss are summed or averaged only through 1.
Returns
-------
TensorVariable
a symbolic tensor variable which is scalar.
"""
# do
if mask is None:
if self.mode == 'mean':
if self.stabilize:
return T.mean(T.nnet.binary_crossentropy(T.clip(input_, 1e-7, 1.0 - 1e-7), label))
else:
return T.mean(T.nnet.binary_crossentropy(input_, label))
elif self.mode == 'sum':
if self.stabilize:
return T.sum(T.nnet.binary_crossentropy(T.clip(input_, 1e-7, 1.0 - 1e-7), label))
else:
return T.sum(T.nnet.binary_crossentropy(input_, label))
else:
raise ValueError('Not implemented mode entered. Mode should be in {mean, sum}.')
else:
if self.mode == 'mean':
if self.stabilize:
return T.sum(T.nnet.binary_crossentropy(T.clip(input_, 1e-7, 1.0 - 1e-7), label) * mask) / T.sum(mask)
else:
return T.sum(T.nnet.binary_crossentropy(input_, label) * mask) / T.sum(mask)
elif self.mode == 'sum':
if self.stabilize:
return T.sum(T.nnet.binary_crossentropy(T.clip(input_, 1e-7, 1.0 - 1e-7), label) * mask)
else:
return T.sum(T.nnet.binary_crossentropy(input_, label) * mask)
else:
raise ValueError('Not implemented mode entered. Mode should be in {mean, sum}.')
class BinaryAccuracy(BaseLayer):
def get_output(self, input_, label):
"""
This function overrides the parents' one.
Computes the loss by model input_ion and real label.
Parameters
----------
input_: TensorVariable
an array of (batch size, input_ion).
for accuracy task, "input_" is 2D matrix.
label: TensorVariable
an array of (batch size, answer) or (batchsize,) if label is a list of class labels.
for classification, highly recommend second one.
should make label as integer.
Returns
-------
TensorVariable
a symbolic tensor variable which is scalar.
"""
# do
# TODO: Not tested
return T.mean(T.eq(T.gt(input_, 0.5), label))
class SquareLoss(BaseLayer):
def __init__(self, mode='mean'):
"""
This function initializes the class.
Parameters
----------
mode: string {'mean', 'sum'}, default: 'mean'
a string to choose how to compute loss as a scalar.
'mean' computes loss as an average loss through (mini) batch.
'sum' computes loss as a sum loss through (mini) batch.
Returns
-------
None.
"""
# check assert
assert mode in ['mean', 'sum'], '"mode" should be either "mean" or "sum".'
# set members
self.mode = mode
def get_output(self, input_, label, mask=None):
"""
This function overrides the parents' one.
Computes the loss by model input_ion and real label.
Parameters
----------
input_: TensorVariable
an array of (batch size, input_ion).
for accuracy task, "input_" is 2D matrix.
label: TensorVariable
an array of (batch size, answer) or (batchsize,) if label is a list of class labels.
for classification, highly recommend first one.
should make label as one-hot encoding.
mask: TensorVariable
an array of (batchsize,) only contains 0 and 1.
loss are summed or averaged only through 1.
Returns
-------
TensorVariable
a symbolic tensor variable which is scalar.
"""
# do
if mask is None:
if self.mode == 'mean':
return 0.5 * T.mean(T.square(input_ - label))
elif self.mode == 'sum':
return 0.5 * T.sum(T.square(input_ - label))
else:
raise ValueError('Not implemented mode entered. Mode should be in {mean, sum}.')
else:
if self.mode == 'mean':
return 0.5 * T.sum(T.sum(T.square(input_ - label), axis=-1) * mask) / T.sum(mask)
elif self.mode == 'sum':
return 0.5 * T.sum(T.sum(T.square(input_ - label), axis=-1) * mask)
else:
raise ValueError('Not implemented mode entered. Mode should be in {mean, sum}.')
class WordPerplexity(BaseLayer):
def get_output(self, input_, label, mask):
"""
This function overrides the parents' one.
Computes the loss by mode input_ion and real label.
Parameters
----------
input_: TensorVariable
an array of (batch size, input_ion).
for accuracy task, "input_" is 2D matrix.
label: TensorVariable
an array of (batch size, answer) or (batchsize,) if label is a list of class labels.
for word perplexity case, currently only second one is supported.
should make label as integer.
mask: TensorVariable
an array of (batchsize,) only contains 0 and 1.
loss are summed or averaged only through 1.
Returns
-------
TensorVariable
a symbolic tensor variable which is scalar.
"""
# do
if mask is None:
return T.pow(2, -T.mean(T.log2(input_[T.arange(label.shape[0]), label])))
else:
return T.pow(2, -T.sum(T.log2(input_[T.arange(label.shape[0]), label]) * mask) / T.sum(mask))
class KLGaussianNormal(BaseLayer):
def __init__(self, input_shape, output_shape):
"""
This function initializes the class.
The shape of two tensor should be double.
Parameters
----------
input_shape: tuple
a tuple of three values, i.e., (input channel, input width, input height).
output_shape: tuple
a tuple of single value, i.e., (input channel,) or (input dim,).
"""
super(KLGaussianNormal, self).__init__()
# check asserts
assert isinstance(input_shape, tuple) and len(input_shape) == 1, '"input_shape" should be a tuple.'
assert isinstance(output_shape, tuple) and len(output_shape) == 1, '"output_shape" should be a tuple.'
assert output_shape[0] * 2 == input_shape[0], '"output_shape" is half of "input_shape", since it contains mu and logvar.'
# set members
self.input_shape = input_shape
self.output_shape = output_shape
def get_output(self, input_):
"""
This function overrides the parents' one.
Creates symbolic function to compute output from an input.
Parameters
----------
input_: TensorVariable
Returns
-------
TensorVariable
"""
mu = input_[:, :self.output_shape[0]]
logvar = input_[:, self.output_shape[0]:]
return 0.5 * T.mean(T.square(mu) + T.exp(logvar) - logvar - 1)
class JSTwoGaussian(BaseLayer):
def __init__(self, input_shape, output_shape):
"""
This function initializes the class.
The shape of two tensor should be double.
Parameters
----------
input_shape: tuple
a tuple of three values, i.e., (input channel, input width, input height).
output_shape: tuple
a tuple of single value, i.e., (input channel,) or (input dim,).
"""
super(JSTwoGaussian, self).__init__()
# check asserts
assert isinstance(input_shape, tuple) and len(input_shape) == 1, '"input_shape" should be a tuple.'
assert isinstance(output_shape, tuple) and len(output_shape) == 1, '"output_shape" should be a tuple.'
assert output_shape[0] * 2 == input_shape[0], '"output_shape" is half of "input_shape", since it contains mu and logvar.'
# set members
self.input_shape = input_shape
self.output_shape = output_shape
def get_output(self, input1_, input2_):
"""
This function overrides the parents' one.
Creates symbolic function to compute output from an input.
http://stats.stackexchange.com/questions/66271/kullback-leibler-divergence-of-two-normal-distributions
Parameters
----------
input_: TensorVariable
Returns
-------
TensorVariable
"""
mu1 = input1_[:, :self.output_shape[0]]
logvar1 = input1_[:, self.output_shape[0]:]
mu2 = input2_[:, :self.output_shape[0]]
logvar2 = input2_[:, self.output_shape[0]:]
return 0.5 * T.mean((T.square(mu1 - mu2) + T.exp(logvar1) + T.exp(logvar2)) * (1 / T.exp(logvar1) + 1 / T.exp(logvar2))) - 2
# TODO: Fix L1, L2 to work!
'''
class L1norm(BaseLayer):
def get_output(self, params):
"""
This function overrides the parents' one.
Computes the loss by summing absolute parameter values.
Parameters
----------
params: list
a list of (shared variable) parameters.
Returns
-------
TensorVariable
a symbolic tensor variable which is scalar.
"""
# check asserts
assert isinstance(params, list), '"params" should be a list type.'
# do
loss_sum = 0
for pp in params:
loss_sum += T.sum(T.abs_(pp))
return loss_sum
class L2norm(BaseLayer):
def get_output(self, params):
"""
This function overrides the parents' one.
Computes the loss by summing squared parameter values.
Parameters
----------
params: list
a list of (shared variable) parameters.
Returns
-------
TensorVariable
a symbolic tensor variable which is scalar.
"""
# check asserts
assert isinstance(params, list), '"params" should be a list type.'
# do
loss_sum = 0
for pp in params:
loss_sum += T.sum(T.square(pp))
return loss_sum
''' | khshim/lemontree | lemontree/objectives.py | Python | mit | 19,014 |
# ________________________________________________________________________
#
# Copyright (C) 2014 Andrew Fullford
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ________________________________________________________________________
#
import os
import select
import errno
PL_SELECT = 0
PL_POLL = 1
PL_KQUEUE = 2
# PL_EPOLL = 3 # Possible future implementation
POLLIN = 1
POLLPRI = 2
POLLOUT = 4
POLLERR = 8
POLLHUP = 16
POLLNVAL = 32
class Error(Exception):
"""
This exception is raise for any internally detected problems.
Using an Exception subclass allows the caller to detect internal
exceptions as distinct from those raised by the underlying
services.
"""
pass
class poll(object):
"""
Presents an interface consitent with select.poll() but uses
select.kqueue(), select.poll() or select.select() depending on services
availale from the O/S.
The service is selected automatically and will typically be the best
choice but it may be overridden with the set_mode() method which must
be used before the first register() call. get_available_modes() returns
the modes possible on this O/S.
There are a few differences to the select.poll() interface:
1. poll.Error exceptions are raised by this module to distinguish them from
the underlying select.* object exceptions. As a special case, the any
exceptions for EINTR are reraised as OSError(errno=EINTR) so callers do
not have to catch the multple inconsistent forms and the python2/python3
variations. Other than this case, no special attempt is made to make
exceptions consistent across the underlying services.
2. The events that are available across all modes are POLLIN and POLLOUT.
POLLPRI is not available with PL_KQUEUE so if you actually need this,
you will probably have to force PL_SELECT mode. PL_SELECT mode should
be available on all systems.
3. select.poll() accepts integer file descriptors and objects with a fileno()
method that returns an integer file descriptor. However, the event that
fires when an object is used for registration holds the file descriptor
returned by the fileno() method rather than the object itself. On the
other hand, select.select() returns the object if that is what was used
in the input lists.
This module adopts the select behavior regardless of the underlying
mode, as it is generally more useful. I'm sure somebody will
explain to me soon why that's not actually true.
"""
def __init__(self):
self._mode_map = dict((val, nam) for nam, val in globals().items() if nam.startswith('PL_'))
self._poll_map = dict((val, nam) for nam, val in globals().items() if nam.startswith('POLL'))
self._poll_keys = list(self._poll_map)
self._poll_keys.sort()
self._available_modes = set()
self._has_registered = False
self._fd_map = {}
self._mode = None
if 'kqueue' in select.__dict__ and callable(select.kqueue): # pragma: no cover
if self._mode is None:
self._mode = PL_KQUEUE
self._available_modes.add(PL_KQUEUE)
if 'poll' in select.__dict__ and callable(select.poll):
if self._mode is None:
self._mode = PL_POLL
self._available_modes.add(PL_POLL)
if 'select' in select.__dict__ and callable(select.select):
if self._mode is None: # pragma: no cover
self._mode = PL_SELECT
self._available_modes.add(PL_SELECT)
else: # pragma: no cover
raise Error("System supports neither select.poll() nor select.select()")
def __len__(self):
return len(self._fd_map)
def get_mode(self):
return self._mode
def set_mode(self, mode):
if self._has_registered:
raise Error("Mode can't be set once register() has been called")
if mode in self._available_modes:
old_mode = self._mode
self._mode = mode
return old_mode
else:
raise Error("Mode '%s' is not available" % self.get_mode_name(mode) if mode in self._mode_map else mode)
def get_mode_name(self, mode=None):
if mode is None:
mode = self._mode
if mode in self._mode_map:
return self._mode_map[mode]
else:
return "Mode" + str(mode)
def get_available_modes(self):
return self._available_modes
def get_available_mode_names(self):
names = []
modes = list(self._mode_map)
modes.sort()
for mode in modes:
if mode in self._available_modes:
names.append(self.get_mode_name(mode))
return names
def get_event(self, evmask):
s = ''
for bit in self._poll_keys:
if evmask & bit:
if s:
s += ','
s += self._poll_map[bit]
return s
def register(self, fo, eventmask=(POLLIN | POLLOUT)):
fd = None
try:
# This tests that the fd is an int type
# In python2, this will also coerce a long
# to an int.
#
fd = int(fo)
except:
pass
if fd is None:
if hasattr(fo, 'fileno') and callable(fo.fileno):
fd = fo.fileno()
else:
raise Error("File object %r is neither 'int' nor object with fileno() method" % fo)
if not isinstance(fd, int):
raise Error("File object %r fileno() method did not return an 'int'" % fo)
# Trigger an exception if the fd in not an open file.
#
os.fstat(fd)
if not self._has_registered:
if self._mode == PL_KQUEUE: # pragma: no cover
self._kq = select.kqueue()
elif self._mode == PL_POLL:
self._poll = select.poll()
elif self._mode == PL_SELECT:
self._rfos = set()
self._wfos = set()
self._xfos = set()
self._has_registered = True
if self._mode == PL_KQUEUE: # pragma: no cover
if eventmask & POLLPRI:
raise Error("POLLPRI is not supported in %s mode", self.get_mode_name(self._mode))
self.unregister(fo)
kl = []
if eventmask & POLLIN:
kl.append(select.kevent(fo, filter=select.KQ_FILTER_READ, flags=select.KQ_EV_ADD))
if eventmask & POLLOUT:
kl.append(select.kevent(fo, filter=select.KQ_FILTER_WRITE, flags=select.KQ_EV_ADD))
self._fd_map[fd] = fo
self._kq.control(kl, 0, 0)
elif self._mode == PL_POLL:
self._fd_map[fd] = fo
return self._poll.register(fo, eventmask)
elif self._mode == PL_SELECT:
self.unregister(fo)
self._fd_map[fd] = fo
if eventmask & POLLIN:
self._rfos.add(fo)
if eventmask & POLLOUT:
self._wfos.add(fo)
if eventmask & POLLPRI:
self._xfos.add(fo)
def modify(self, fo, eventmask):
if self._mode == PL_KQUEUE:
self.register(fo, eventmask)
elif self._mode == PL_POLL:
return self._poll.modify(fo, eventmask)
elif self._mode == PL_SELECT:
self.register(fo, eventmask)
def unregister(self, fo):
fd = None
try:
fd = int(fo)
except:
pass
if fd is None:
if hasattr(fo, 'fileno') and callable(fo.fileno):
fd = fo.fileno()
else:
raise Error("File object '%s' is neither 'int' nor object with fileno() method" % fo)
if fd in self._fd_map:
del self._fd_map[fd]
if self._mode == PL_KQUEUE: # pragma: no cover
ev = select.kevent(fo, filter=select.KQ_FILTER_READ, flags=select.KQ_EV_DELETE)
try:
self._kq.control([ev], 0, 0)
except:
pass
ev = select.kevent(fo, filter=select.KQ_FILTER_WRITE, flags=select.KQ_EV_DELETE)
try:
self._kq.control([ev], 0, 0)
except:
pass
elif self._mode == PL_POLL:
return self._poll.unregister(fo)
elif self._mode == PL_SELECT:
self._rfos.discard(fo)
self._wfos.discard(fo)
self._xfos.discard(fo)
def poll(self, timeout=None):
if not self._has_registered:
raise Error("poll() attempt before any objects have been registered")
try:
if self._mode == PL_KQUEUE: # pragma: no cover
if timeout is not None:
timeout /= 1000.0
evlist = []
kelist = self._kq.control(None, 1024, timeout)
if not kelist:
return evlist
for ke in kelist:
fd = ke.ident
if fd not in self._fd_map:
raise Error("Unknown fd '%s' in kevent" % fd)
if ke.filter == select.KQ_FILTER_READ:
evlist.append((self._fd_map[fd], POLLIN))
elif ke.filter == select.KQ_FILTER_WRITE:
evlist.append((self._fd_map[fd], POLLOUT))
else:
raise Error("Unexpected filter 0x%x from kevent for fd %d" % (ke.filter, fd))
return evlist
elif self._mode == PL_POLL:
evlist = []
pllist = self._poll.poll(timeout)
for pl in pllist:
(fd, mask) = pl
if fd not in self._fd_map: # pragma: no cover
raise Error("Unknown fd '%s' in select.poll()" % fd)
evlist.append((self._fd_map[fd], mask))
return evlist
elif self._mode == PL_SELECT:
if timeout is not None:
timeout /= 1000.0
rfos, wfos, xfos = select.select(self._rfos, self._wfos, self._xfos, timeout)
# select.select() already returns the registered object so no need
# to map through _fd_map.
#
evlist = []
for fo in xfos:
evlist.append((fo, POLLPRI))
for fo in rfos:
evlist.append((fo, POLLIN))
for fo in wfos:
evlist.append((fo, POLLOUT))
return evlist
except Exception as e:
ecode = None
etext = None
try:
ecode = e.errno
etext = e.strerror
except:
pass
if ecode is None:
try:
ecode = e[0]
etext = e[1]
except: # pragma: no cover
pass
if ecode == errno.EINTR:
raise OSError(ecode, etext)
else:
raise e # pragma: no cover
| akfullfo/pollinator | pollinator/poll.py | Python | apache-2.0 | 12,116 |
# take a bunch of model_0 model_1 etc files and merge them alphabetically
# first, import settings;
import sys
print sys.argv
if len(sys.argv) > 1:
execfile(sys.argv[1] + "/settings.py")
else:
from settings import *
# for each file, load the file into one giant list
# call sort on the list
# write this output somewhere else
model = dict()
##Add the full vocabulary to the dictionary
##fdict = open("./input_data/word_ids.dat","r")
##for line in fdict:
## pieces = (line.replace('\t',' ')).split(' ',1)
## key = (pieces[1].strip()).replace('\"','')
## value = ''
## for unused in range(GLOBAL_TOPICS):
## value = value + '0 '
## value = value.strip() + '\n'
## model[key] = value
##fdict.close()
# add dummy values for full vocab, and then replace what we actually have
# this way we make sure the resulting file spans everything in the vocab
# in PLDA, this doesn't matter, but it is critical for certain CLDA scripts
fdict = open(PLDA_CORPUS_DIRECTORY + "/vocab.full.dat","r")
for line in fdict:
key = line.strip()
value = ''
# create empty set of values, will be replaced if it actually appears
for unused in range(LOCAL_TOPICS):
value = value + '0 '
value = value.strip() + '\n' # remove trailing space
model[key] = value
fdict.close()
#Replace words that actually appear
for num in range(PLDA_CHUNKS):
infile = open(EXPERIMENT_DIRECTORY + "/partial_results/partial-model_"+str(num),"r")
for line in infile:
# Split the line into the word and the values, so we can do a lookup
pieces = (line.replace('\t',' ')).split(' ',1)
# replace the dummy line in the model with the real values
model[pieces[0]] = pieces[1]
infile.close()
outmodel = sorted(model) # gives sorted list of keys
outfile = open(EXPERIMENT_DIRECTORY + "/local_models/full.model","w")
for key in outmodel:
outfile.write(key + " " + model[key])
outfile.close()
| groppcw/TopicModelingPipeline | common/PLDA/02-collectModel.py | Python | apache-2.0 | 1,897 |
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import util
from sqlalchemy.orm import aliased
from sqlalchemy.orm import backref
from sqlalchemy.orm import configure_mappers
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import relationship
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import subqueryload
from sqlalchemy.orm import with_polymorphic
from sqlalchemy.sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing.entities import ComparableEntity
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class Company(fixtures.ComparableEntity):
pass
class Person(fixtures.ComparableEntity):
pass
class Engineer(Person):
pass
class Manager(Person):
pass
class Boss(Manager):
pass
class Machine(fixtures.ComparableEntity):
pass
class Paperwork(fixtures.ComparableEntity):
pass
def _aliased_join_warning(arg):
return testing.expect_warnings(
"An alias is being generated automatically against joined entity "
"mapped class %s due to overlapping tables" % (arg,)
)
class SelfReferentialTestJoinedToBase(fixtures.MappedTest):
run_setup_mappers = "once"
@classmethod
def define_tables(cls, metadata):
Table(
"people",
metadata,
Column(
"person_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(50)),
Column("type", String(30)),
)
Table(
"engineers",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
Column("primary_language", String(50)),
Column("reports_to_id", Integer, ForeignKey("people.person_id")),
)
@classmethod
def setup_mappers(cls):
engineers, people = cls.tables.engineers, cls.tables.people
cls.mapper_registry.map_imperatively(
Person,
people,
polymorphic_on=people.c.type,
polymorphic_identity="person",
)
cls.mapper_registry.map_imperatively(
Engineer,
engineers,
inherits=Person,
inherit_condition=engineers.c.person_id == people.c.person_id,
polymorphic_identity="engineer",
properties={
"reports_to": relationship(
Person,
primaryjoin=(
people.c.person_id == engineers.c.reports_to_id
),
)
},
)
def test_has(self):
p1 = Person(name="dogbert")
e1 = Engineer(name="dilbert", primary_language="java", reports_to=p1)
sess = fixture_session()
sess.add(p1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(
sess.query(Engineer)
.filter(Engineer.reports_to.has(Person.name == "dogbert"))
.first(),
Engineer(name="dilbert"),
)
def test_oftype_aliases_in_exists(self):
e1 = Engineer(name="dilbert", primary_language="java")
e2 = Engineer(name="wally", primary_language="c++", reports_to=e1)
sess = fixture_session()
sess.add_all([e1, e2])
sess.flush()
eq_(
sess.query(Engineer)
.filter(
Engineer.reports_to.of_type(Engineer).has(
Engineer.name == "dilbert"
)
)
.first(),
e2,
)
def test_join(self):
p1 = Person(name="dogbert")
e1 = Engineer(name="dilbert", primary_language="java", reports_to=p1)
sess = fixture_session()
sess.add(p1)
sess.add(e1)
sess.flush()
sess.expunge_all()
pa = aliased(Person)
eq_(
sess.query(Engineer)
.join(pa, "reports_to")
.filter(pa.name == "dogbert")
.first(),
Engineer(name="dilbert"),
)
class SelfReferentialJ2JTest(fixtures.MappedTest):
run_setup_mappers = "once"
@classmethod
def define_tables(cls, metadata):
Table(
"people",
metadata,
Column(
"person_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(50)),
Column("type", String(30)),
)
Table(
"engineers",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
Column("primary_language", String(50)),
Column("reports_to_id", Integer, ForeignKey("managers.person_id")),
)
Table(
"managers",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
)
@classmethod
def setup_mappers(cls):
engineers = cls.tables.engineers
managers = cls.tables.managers
people = cls.tables.people
cls.mapper_registry.map_imperatively(
Person,
people,
polymorphic_on=people.c.type,
polymorphic_identity="person",
)
cls.mapper_registry.map_imperatively(
Manager, managers, inherits=Person, polymorphic_identity="manager"
)
cls.mapper_registry.map_imperatively(
Engineer,
engineers,
inherits=Person,
polymorphic_identity="engineer",
properties={
"reports_to": relationship(
Manager,
primaryjoin=(
managers.c.person_id == engineers.c.reports_to_id
),
backref="engineers",
)
},
)
def test_has(self):
m1 = Manager(name="dogbert")
e1 = Engineer(name="dilbert", primary_language="java", reports_to=m1)
sess = fixture_session()
sess.add(m1)
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(
sess.query(Engineer)
.filter(Engineer.reports_to.has(Manager.name == "dogbert"))
.first(),
Engineer(name="dilbert"),
)
def test_join(self):
m1 = Manager(name="dogbert")
e1 = Engineer(name="dilbert", primary_language="java", reports_to=m1)
sess = fixture_session()
sess.add(m1)
sess.add(e1)
sess.flush()
sess.expunge_all()
ma = aliased(Manager)
eq_(
sess.query(Engineer)
.join(ma, "reports_to")
.filter(ma.name == "dogbert")
.first(),
Engineer(name="dilbert"),
)
@testing.combinations((True,), (False,), argnames="autoalias")
def test_filter_aliasing(self, autoalias):
m1 = Manager(name="dogbert")
m2 = Manager(name="foo")
e1 = Engineer(name="wally", primary_language="java", reports_to=m1)
e2 = Engineer(name="dilbert", primary_language="c++", reports_to=m2)
e3 = Engineer(name="etc", primary_language="c++")
sess = fixture_session()
sess.add_all([m1, m2, e1, e2, e3])
sess.flush()
sess.expunge_all()
if autoalias:
# filter aliasing applied to Engineer doesn't whack Manager
with _aliased_join_warning("Engineer->engineers"):
eq_(
sess.query(Manager)
.join(Manager.engineers)
.filter(Manager.name == "dogbert")
.all(),
[m1],
)
with _aliased_join_warning("Engineer->engineers"):
eq_(
sess.query(Manager)
.join(Manager.engineers)
.filter(Engineer.name == "dilbert")
.all(),
[m2],
)
with _aliased_join_warning("Engineer->engineers"):
eq_(
sess.query(Manager, Engineer)
.join(Manager.engineers)
.order_by(Manager.name.desc())
.all(),
[(m2, e2), (m1, e1)],
)
else:
eng = aliased(Engineer, flat=True)
eq_(
sess.query(Manager)
.join(Manager.engineers.of_type(eng))
.filter(Manager.name == "dogbert")
.all(),
[m1],
)
eq_(
sess.query(Manager)
.join(Manager.engineers.of_type(eng))
.filter(eng.name == "dilbert")
.all(),
[m2],
)
eq_(
sess.query(Manager, eng)
.join(Manager.engineers.of_type(eng))
.order_by(Manager.name.desc())
.all(),
[(m2, e2), (m1, e1)],
)
@testing.combinations((True,), (False,), argnames="autoalias")
def test_relationship_compare(self, autoalias):
m1 = Manager(name="dogbert")
m2 = Manager(name="foo")
e1 = Engineer(name="dilbert", primary_language="java", reports_to=m1)
e2 = Engineer(name="wally", primary_language="c++", reports_to=m2)
e3 = Engineer(name="etc", primary_language="c++")
sess = fixture_session()
sess.add(m1)
sess.add(m2)
sess.add(e1)
sess.add(e2)
sess.add(e3)
sess.flush()
sess.expunge_all()
if autoalias:
with _aliased_join_warning("Engineer->engineers"):
eq_(
sess.query(Manager)
.join(Manager.engineers)
.filter(Engineer.reports_to == None)
.all(),
[],
)
with _aliased_join_warning("Engineer->engineers"):
eq_(
sess.query(Manager)
.join(Manager.engineers)
.filter(Engineer.reports_to == m1)
.all(),
[m1],
)
else:
eng = aliased(Engineer, flat=True)
eq_(
sess.query(Manager)
.join(Manager.engineers.of_type(eng))
.filter(eng.reports_to == None)
.all(),
[],
)
eq_(
sess.query(Manager)
.join(Manager.engineers.of_type(eng))
.filter(eng.reports_to == m1)
.all(),
[m1],
)
class SelfReferentialJ2JSelfTest(fixtures.MappedTest):
run_setup_mappers = "once"
@classmethod
def define_tables(cls, metadata):
Table(
"people",
metadata,
Column(
"person_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(50)),
Column("type", String(30)),
)
Table(
"engineers",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
Column(
"reports_to_id", Integer, ForeignKey("engineers.person_id")
),
)
@classmethod
def setup_mappers(cls):
engineers = cls.tables.engineers
people = cls.tables.people
cls.mapper_registry.map_imperatively(
Person,
people,
polymorphic_on=people.c.type,
polymorphic_identity="person",
)
cls.mapper_registry.map_imperatively(
Engineer,
engineers,
inherits=Person,
polymorphic_identity="engineer",
properties={
"reports_to": relationship(
Engineer,
primaryjoin=(
engineers.c.person_id == engineers.c.reports_to_id
),
backref="engineers",
remote_side=engineers.c.person_id,
)
},
)
def _two_obj_fixture(self):
e1 = Engineer(name="wally")
e2 = Engineer(name="dilbert", reports_to=e1)
sess = fixture_session()
sess.add_all([e1, e2])
sess.commit()
return sess
def _five_obj_fixture(self):
sess = fixture_session()
e1, e2, e3, e4, e5 = [Engineer(name="e%d" % (i + 1)) for i in range(5)]
e3.reports_to = e1
e4.reports_to = e2
sess.add_all([e1, e2, e3, e4, e5])
sess.commit()
return sess
def test_has(self):
sess = self._two_obj_fixture()
eq_(
sess.query(Engineer)
.filter(Engineer.reports_to.has(Engineer.name == "wally"))
.first(),
Engineer(name="dilbert"),
)
def test_join_explicit_alias(self):
sess = self._five_obj_fixture()
ea = aliased(Engineer)
eq_(
sess.query(Engineer)
.join(ea, Engineer.engineers)
.filter(Engineer.name == "e1")
.all(),
[Engineer(name="e1")],
)
def test_join_aliased_one(self):
sess = self._two_obj_fixture()
ea = aliased(Engineer)
eq_(
sess.query(Engineer)
.join(ea, "reports_to")
.filter(ea.name == "wally")
.first(),
Engineer(name="dilbert"),
)
def test_join_aliased_two(self):
sess = self._five_obj_fixture()
ea = aliased(Engineer)
eq_(
sess.query(Engineer)
.join(ea, Engineer.engineers)
.filter(ea.name == "e4")
.all(),
[Engineer(name="e2")],
)
def test_relationship_compare(self):
sess = self._five_obj_fixture()
e1 = sess.query(Engineer).filter_by(name="e1").one()
e2 = sess.query(Engineer).filter_by(name="e2").one()
ea = aliased(Engineer)
eq_(
sess.query(Engineer)
.join(ea, Engineer.engineers)
.filter(ea.reports_to == None)
.all(), # noqa
[],
)
eq_(
sess.query(Engineer)
.join(ea, Engineer.engineers)
.filter(ea.reports_to == e1)
.all(),
[e1],
)
eq_(
sess.query(Engineer)
.join(ea, Engineer.engineers)
.filter(ea.reports_to != None)
.all(), # noqa
[e1, e2],
)
class M2MFilterTest(fixtures.MappedTest):
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
"organizations",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50)),
)
Table(
"engineers_to_org",
metadata,
Column("org_id", Integer, ForeignKey("organizations.id")),
Column("engineer_id", Integer, ForeignKey("engineers.person_id")),
)
Table(
"people",
metadata,
Column(
"person_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(50)),
Column("type", String(30)),
)
Table(
"engineers",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
Column("primary_language", String(50)),
)
@classmethod
def setup_mappers(cls):
organizations = cls.tables.organizations
people = cls.tables.people
engineers = cls.tables.engineers
engineers_to_org = cls.tables.engineers_to_org
class Organization(cls.Comparable):
pass
cls.mapper_registry.map_imperatively(
Organization,
organizations,
properties={
"engineers": relationship(
Engineer,
secondary=engineers_to_org,
backref="organizations",
)
},
)
cls.mapper_registry.map_imperatively(
Person,
people,
polymorphic_on=people.c.type,
polymorphic_identity="person",
)
cls.mapper_registry.map_imperatively(
Engineer,
engineers,
inherits=Person,
polymorphic_identity="engineer",
)
@classmethod
def insert_data(cls, connection):
Organization = cls.classes.Organization
e1 = Engineer(name="e1")
e2 = Engineer(name="e2")
e3 = Engineer(name="e3")
e4 = Engineer(name="e4")
org1 = Organization(name="org1", engineers=[e1, e2])
org2 = Organization(name="org2", engineers=[e3, e4])
with sessionmaker(connection).begin() as sess:
sess.add(org1)
sess.add(org2)
def test_not_contains(self):
Organization = self.classes.Organization
sess = fixture_session()
e1 = sess.query(Person).filter(Engineer.name == "e1").one()
eq_(
sess.query(Organization)
.filter(~Organization.engineers.of_type(Engineer).contains(e1))
.all(),
[Organization(name="org2")],
)
# this had a bug
eq_(
sess.query(Organization)
.filter(~Organization.engineers.contains(e1))
.all(),
[Organization(name="org2")],
)
def test_any(self):
sess = fixture_session()
Organization = self.classes.Organization
eq_(
sess.query(Organization)
.filter(
Organization.engineers.of_type(Engineer).any(
Engineer.name == "e1"
)
)
.all(),
[Organization(name="org1")],
)
eq_(
sess.query(Organization)
.filter(Organization.engineers.any(Engineer.name == "e1"))
.all(),
[Organization(name="org1")],
)
class SelfReferentialM2MTest(fixtures.MappedTest, AssertsCompiledSQL):
__dialect__ = "default"
@classmethod
def define_tables(cls, metadata):
Table(
"secondary",
metadata,
Column(
"left_id", Integer, ForeignKey("parent.id"), nullable=False
),
Column(
"right_id", Integer, ForeignKey("parent.id"), nullable=False
),
)
Table(
"parent",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("cls", String(50)),
)
Table(
"child1",
metadata,
Column("id", Integer, ForeignKey("parent.id"), primary_key=True),
)
Table(
"child2",
metadata,
Column("id", Integer, ForeignKey("parent.id"), primary_key=True),
)
@classmethod
def setup_classes(cls):
class Parent(cls.Basic):
pass
class Child1(Parent):
pass
class Child2(Parent):
pass
@classmethod
def setup_mappers(cls):
child1 = cls.tables.child1
child2 = cls.tables.child2
Parent = cls.classes.Parent
parent = cls.tables.parent
Child1 = cls.classes.Child1
Child2 = cls.classes.Child2
secondary = cls.tables.secondary
cls.mapper_registry.map_imperatively(
Parent, parent, polymorphic_on=parent.c.cls
)
cls.mapper_registry.map_imperatively(
Child1,
child1,
inherits=Parent,
polymorphic_identity="child1",
properties={
"left_child2": relationship(
Child2,
secondary=secondary,
primaryjoin=parent.c.id == secondary.c.right_id,
secondaryjoin=parent.c.id == secondary.c.left_id,
uselist=False,
backref="right_children",
)
},
)
cls.mapper_registry.map_imperatively(
Child2, child2, inherits=Parent, polymorphic_identity="child2"
)
def test_query_crit(self):
Child1, Child2 = self.classes.Child1, self.classes.Child2
sess = fixture_session()
c11, c12, c13 = Child1(), Child1(), Child1()
c21, c22, c23 = Child2(), Child2(), Child2()
c11.left_child2 = c22
c12.left_child2 = c22
c13.left_child2 = c23
sess.add_all([c11, c12, c13, c21, c22, c23])
sess.flush()
# auto alias test:
# test that the join to Child2 doesn't alias Child1 in the select
stmt = select(Child1).join(Child1.left_child2)
with _aliased_join_warning("Child2->child2"):
eq_(
set(sess.execute(stmt).scalars().unique()),
set([c11, c12, c13]),
)
with _aliased_join_warning("Child2->child2"):
eq_(
set(sess.query(Child1, Child2).join(Child1.left_child2)),
set([(c11, c22), (c12, c22), (c13, c23)]),
)
# manual alias test:
c2 = aliased(Child2)
stmt = select(Child1).join(Child1.left_child2.of_type(c2))
eq_(
set(sess.execute(stmt).scalars().unique()),
set([c11, c12, c13]),
)
eq_(
set(sess.query(Child1, c2).join(Child1.left_child2.of_type(c2))),
set([(c11, c22), (c12, c22), (c13, c23)]),
)
# test __eq__() on property is annotating correctly
stmt = (
select(Child2)
.join(Child2.right_children)
.where(Child1.left_child2 == c22)
)
with _aliased_join_warning("Child1->child1"):
eq_(
set(sess.execute(stmt).scalars().unique()),
set([c22]),
)
# manual aliased version
c1 = aliased(Child1, flat=True)
stmt = (
select(Child2)
.join(Child2.right_children.of_type(c1))
.where(c1.left_child2 == c22)
)
eq_(
set(sess.execute(stmt).scalars().unique()),
set([c22]),
)
# test the same again
with _aliased_join_warning("Child1->child1"):
self.assert_compile(
sess.query(Child2)
.join(Child2.right_children)
.filter(Child1.left_child2 == c22)
.statement,
"SELECT child2.id, parent.id AS id_1, parent.cls "
"FROM secondary AS secondary_1, parent "
"JOIN child2 ON parent.id = child2.id "
"JOIN secondary AS secondary_2 ON parent.id = "
"secondary_2.left_id "
"JOIN (parent AS parent_1 JOIN child1 AS child1_1 "
"ON parent_1.id = child1_1.id) ON parent_1.id = "
"secondary_2.right_id "
"WHERE parent_1.id = secondary_1.right_id "
"AND :param_1 = secondary_1.left_id",
)
# non aliased version
self.assert_compile(
sess.query(Child2)
.join(Child2.right_children.of_type(c1))
.filter(c1.left_child2 == c22)
.statement,
"SELECT child2.id, parent.id AS id_1, parent.cls "
"FROM secondary AS secondary_1, parent "
"JOIN child2 ON parent.id = child2.id "
"JOIN secondary AS secondary_2 ON parent.id = secondary_2.left_id "
"JOIN (parent AS parent_1 JOIN child1 AS child1_1 "
"ON parent_1.id = child1_1.id) ON parent_1.id = "
"secondary_2.right_id "
"WHERE parent_1.id = secondary_1.right_id "
"AND :param_1 = secondary_1.left_id",
)
def test_query_crit_core_workaround(self):
# do a test in the style of orm/test_core_compilation.py
Child1, Child2 = self.classes.Child1, self.classes.Child2
secondary = self.tables.secondary
configure_mappers()
from sqlalchemy.sql import join
C1 = aliased(Child1, flat=True)
# this was "figure out all the things we need to do in Core to make
# the identical query that the ORM renders.", however as of
# I765a0b912b3dcd0e995426427d8bb7997cbffd51 this is using the ORM
# to create the query in any case
salias = secondary.alias()
stmt = (
select(Child2)
.select_from(
join(
Child2,
salias,
Child2.id.expressions[1] == salias.c.left_id,
).join(C1, salias.c.right_id == C1.id.expressions[1])
)
.where(C1.left_child2 == Child2(id=1))
)
self.assert_compile(
stmt.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
"SELECT child2.id AS child2_id, parent.id AS parent_id, "
"parent.cls AS parent_cls "
"FROM secondary AS secondary_1, "
"parent JOIN child2 ON parent.id = child2.id JOIN secondary AS "
"secondary_2 ON parent.id = secondary_2.left_id JOIN "
"(parent AS parent_1 JOIN child1 AS child1_1 "
"ON parent_1.id = child1_1.id) "
"ON parent_1.id = secondary_2.right_id WHERE "
"parent_1.id = secondary_1.right_id AND :param_1 = "
"secondary_1.left_id",
)
def test_eager_join(self):
Child1, Child2 = self.classes.Child1, self.classes.Child2
sess = fixture_session()
c1 = Child1()
c1.left_child2 = Child2()
sess.add(c1)
sess.flush()
# test that the splicing of the join works here, doesn't break in
# the middle of "parent join child1"
q = sess.query(Child1).options(joinedload("left_child2"))
self.assert_compile(
q.limit(1).statement,
"SELECT child1.id, parent.id AS id_1, parent.cls, "
"child2_1.id AS id_2, parent_1.id AS id_3, parent_1.cls AS cls_1 "
"FROM parent JOIN child1 ON parent.id = child1.id "
"LEFT OUTER JOIN (secondary AS secondary_1 "
"JOIN (parent AS parent_1 JOIN child2 AS child2_1 "
"ON parent_1.id = child2_1.id) ON parent_1.id = "
"secondary_1.left_id) ON parent.id = secondary_1.right_id "
"LIMIT :param_1",
checkparams={"param_1": 1},
)
# another way to check
eq_(
sess.scalar(
select(func.count("*")).select_from(q.limit(1).subquery())
),
1,
)
assert q.first() is c1
def test_subquery_load(self):
Child1, Child2 = self.classes.Child1, self.classes.Child2
sess = fixture_session()
c1 = Child1()
c1.left_child2 = Child2()
sess.add(c1)
sess.flush()
sess.expunge_all()
query_ = sess.query(Child1).options(subqueryload("left_child2"))
for row in query_.all():
assert row.left_child2
class EagerToSubclassTest(fixtures.MappedTest):
"""Test eager loads to subclass mappers"""
run_setup_classes = "once"
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
"parent",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(10)),
)
Table(
"base",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("type", String(10)),
Column("related_id", Integer, ForeignKey("related.id")),
)
Table(
"sub",
metadata,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
Column("data", String(10)),
Column(
"parent_id", Integer, ForeignKey("parent.id"), nullable=False
),
)
Table(
"related",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(10)),
)
@classmethod
def setup_classes(cls):
class Parent(cls.Comparable):
pass
class Base(cls.Comparable):
pass
class Sub(Base):
pass
class Related(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
sub = cls.tables.sub
Sub = cls.classes.Sub
base = cls.tables.base
Base = cls.classes.Base
parent = cls.tables.parent
Parent = cls.classes.Parent
related = cls.tables.related
Related = cls.classes.Related
cls.mapper_registry.map_imperatively(
Parent,
parent,
properties={"children": relationship(Sub, order_by=sub.c.data)},
)
cls.mapper_registry.map_imperatively(
Base,
base,
polymorphic_on=base.c.type,
polymorphic_identity="b",
properties={"related": relationship(Related)},
)
cls.mapper_registry.map_imperatively(
Sub, sub, inherits=Base, polymorphic_identity="s"
)
cls.mapper_registry.map_imperatively(Related, related)
@classmethod
def insert_data(cls, connection):
global p1, p2
Parent = cls.classes.Parent
Sub = cls.classes.Sub
Related = cls.classes.Related
sess = Session(connection)
r1, r2 = Related(data="r1"), Related(data="r2")
s1 = Sub(data="s1", related=r1)
s2 = Sub(data="s2", related=r2)
s3 = Sub(data="s3")
s4 = Sub(data="s4", related=r2)
s5 = Sub(data="s5")
p1 = Parent(data="p1", children=[s1, s2, s3])
p2 = Parent(data="p2", children=[s4, s5])
sess.add(p1)
sess.add(p2)
sess.commit()
def test_joinedload(self):
Parent = self.classes.Parent
sess = fixture_session()
def go():
eq_(
sess.query(Parent).options(joinedload(Parent.children)).all(),
[p1, p2],
)
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager(self):
Parent = self.classes.Parent
Sub = self.classes.Sub
sess = fixture_session()
def go():
eq_(
sess.query(Parent)
.join(Parent.children)
.options(contains_eager(Parent.children))
.order_by(Parent.data, Sub.data)
.all(),
[p1, p2],
)
self.assert_sql_count(testing.db, go, 1)
def test_subq_through_related(self):
Parent = self.classes.Parent
Base = self.classes.Base
sess = fixture_session()
def go():
eq_(
sess.query(Parent)
.options(
subqueryload(Parent.children).subqueryload(Base.related)
)
.order_by(Parent.data)
.all(),
[p1, p2],
)
self.assert_sql_count(testing.db, go, 3)
def test_subq_through_related_aliased(self):
Parent = self.classes.Parent
Base = self.classes.Base
pa = aliased(Parent)
sess = fixture_session()
def go():
eq_(
sess.query(pa)
.options(subqueryload(pa.children).subqueryload(Base.related))
.order_by(pa.data)
.all(),
[p1, p2],
)
self.assert_sql_count(testing.db, go, 3)
class SubClassEagerToSubClassTest(fixtures.MappedTest):
"""Test joinedloads from subclass to subclass mappers"""
run_setup_classes = "once"
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
"parent",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("type", String(10)),
)
Table(
"subparent",
metadata,
Column("id", Integer, ForeignKey("parent.id"), primary_key=True),
Column("data", String(10)),
)
Table(
"base",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("type", String(10)),
)
Table(
"sub",
metadata,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
Column("data", String(10)),
Column(
"subparent_id",
Integer,
ForeignKey("subparent.id"),
nullable=False,
),
)
@classmethod
def setup_classes(cls):
class Parent(cls.Comparable):
pass
class Subparent(Parent):
pass
class Base(cls.Comparable):
pass
class Sub(Base):
pass
@classmethod
def setup_mappers(cls):
sub = cls.tables.sub
Sub = cls.classes.Sub
base = cls.tables.base
Base = cls.classes.Base
parent = cls.tables.parent
Parent = cls.classes.Parent
subparent = cls.tables.subparent
Subparent = cls.classes.Subparent
cls.mapper_registry.map_imperatively(
Parent,
parent,
polymorphic_on=parent.c.type,
polymorphic_identity="b",
)
cls.mapper_registry.map_imperatively(
Subparent,
subparent,
inherits=Parent,
polymorphic_identity="s",
properties={"children": relationship(Sub, order_by=base.c.id)},
)
cls.mapper_registry.map_imperatively(
Base, base, polymorphic_on=base.c.type, polymorphic_identity="b"
)
cls.mapper_registry.map_imperatively(
Sub, sub, inherits=Base, polymorphic_identity="s"
)
@classmethod
def insert_data(cls, connection):
global p1, p2
Sub, Subparent = cls.classes.Sub, cls.classes.Subparent
with sessionmaker(connection).begin() as sess:
p1 = Subparent(
data="p1",
children=[Sub(data="s1"), Sub(data="s2"), Sub(data="s3")],
)
p2 = Subparent(
data="p2", children=[Sub(data="s4"), Sub(data="s5")]
)
sess.add(p1)
sess.add(p2)
def test_joinedload(self):
Subparent = self.classes.Subparent
sess = fixture_session()
def go():
eq_(
sess.query(Subparent)
.options(joinedload(Subparent.children))
.all(),
[p1, p2],
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
eq_(
sess.query(Subparent).options(joinedload("children")).all(),
[p1, p2],
)
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager(self):
Subparent = self.classes.Subparent
sess = fixture_session()
def go():
eq_(
sess.query(Subparent)
.join(Subparent.children)
.options(contains_eager(Subparent.children))
.all(),
[p1, p2],
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
eq_(
sess.query(Subparent)
.join(Subparent.children)
.options(contains_eager("children"))
.all(),
[p1, p2],
)
self.assert_sql_count(testing.db, go, 1)
def test_subqueryload(self):
Subparent = self.classes.Subparent
sess = fixture_session()
def go():
eq_(
sess.query(Subparent)
.options(subqueryload(Subparent.children))
.all(),
[p1, p2],
)
self.assert_sql_count(testing.db, go, 2)
sess.expunge_all()
def go():
eq_(
sess.query(Subparent).options(subqueryload("children")).all(),
[p1, p2],
)
self.assert_sql_count(testing.db, go, 2)
class SameNamedPropTwoPolymorphicSubClassesTest(fixtures.MappedTest):
"""test pathing when two subclasses contain a different property
for the same name, and polymorphic loading is used.
#2614
"""
run_setup_classes = "once"
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("type", String(10)),
)
Table(
"b",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
)
Table(
"btod",
metadata,
Column("bid", Integer, ForeignKey("b.id"), nullable=False),
Column("did", Integer, ForeignKey("d.id"), nullable=False),
)
Table(
"c",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
)
Table(
"ctod",
metadata,
Column("cid", Integer, ForeignKey("c.id"), nullable=False),
Column("did", Integer, ForeignKey("d.id"), nullable=False),
)
Table(
"d",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(A):
pass
class C(A):
pass
class D(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
A = cls.classes.A
B = cls.classes.B
C = cls.classes.C
D = cls.classes.D
cls.mapper_registry.map_imperatively(
A, cls.tables.a, polymorphic_on=cls.tables.a.c.type
)
cls.mapper_registry.map_imperatively(
B,
cls.tables.b,
inherits=A,
polymorphic_identity="b",
properties={"related": relationship(D, secondary=cls.tables.btod)},
)
cls.mapper_registry.map_imperatively(
C,
cls.tables.c,
inherits=A,
polymorphic_identity="c",
properties={"related": relationship(D, secondary=cls.tables.ctod)},
)
cls.mapper_registry.map_imperatively(D, cls.tables.d)
@classmethod
def insert_data(cls, connection):
B = cls.classes.B
C = cls.classes.C
D = cls.classes.D
session = Session(connection)
d = D()
session.add_all([B(related=[d]), C(related=[d])])
session.commit()
def test_free_w_poly_subquery(self):
A = self.classes.A
B = self.classes.B
C = self.classes.C
D = self.classes.D
session = fixture_session()
d = session.query(D).one()
a_poly = with_polymorphic(A, [B, C])
def go():
for a in session.query(a_poly).options(
subqueryload(a_poly.B.related), subqueryload(a_poly.C.related)
):
eq_(a.related, [d])
self.assert_sql_count(testing.db, go, 3)
def test_fixed_w_poly_subquery(self):
A = self.classes.A
B = self.classes.B
C = self.classes.C
D = self.classes.D
session = fixture_session()
d = session.query(D).one()
def go():
# NOTE: subqueryload is broken for this case, first found
# when cartesian product detection was added.
for a in (
session.query(A)
.with_polymorphic([B, C])
.options(selectinload(B.related), selectinload(C.related))
):
eq_(a.related, [d])
self.assert_sql_count(testing.db, go, 3)
def test_free_w_poly_joined(self):
A = self.classes.A
B = self.classes.B
C = self.classes.C
D = self.classes.D
session = fixture_session()
d = session.query(D).one()
a_poly = with_polymorphic(A, [B, C])
def go():
for a in session.query(a_poly).options(
joinedload(a_poly.B.related), joinedload(a_poly.C.related)
):
eq_(a.related, [d])
self.assert_sql_count(testing.db, go, 1)
def test_fixed_w_poly_joined(self):
A = self.classes.A
B = self.classes.B
C = self.classes.C
D = self.classes.D
session = fixture_session()
d = session.query(D).one()
def go():
for a in (
session.query(A)
.with_polymorphic([B, C])
.options(joinedload(B.related), joinedload(C.related))
):
eq_(a.related, [d])
self.assert_sql_count(testing.db, go, 1)
class SubClassToSubClassFromParentTest(fixtures.MappedTest):
"""test #2617"""
run_setup_classes = "once"
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
"z",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("type", String(10)),
Column("z_id", Integer, ForeignKey("z.id")),
)
Table(
"b",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
)
Table(
"d",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("b_id", Integer, ForeignKey("b.id")),
)
@classmethod
def setup_classes(cls):
class Z(cls.Comparable):
pass
class A(cls.Comparable):
pass
class B(A):
pass
class D(A):
pass
@classmethod
def setup_mappers(cls):
Z = cls.classes.Z
A = cls.classes.A
B = cls.classes.B
D = cls.classes.D
cls.mapper_registry.map_imperatively(Z, cls.tables.z)
cls.mapper_registry.map_imperatively(
A,
cls.tables.a,
polymorphic_on=cls.tables.a.c.type,
with_polymorphic="*",
properties={"zs": relationship(Z, lazy="subquery")},
)
cls.mapper_registry.map_imperatively(
B,
cls.tables.b,
inherits=A,
polymorphic_identity="b",
properties={
"related": relationship(
D,
lazy="subquery",
primaryjoin=cls.tables.d.c.b_id == cls.tables.b.c.id,
)
},
)
cls.mapper_registry.map_imperatively(
D, cls.tables.d, inherits=A, polymorphic_identity="d"
)
@classmethod
def insert_data(cls, connection):
B = cls.classes.B
session = Session(connection)
session.add(B())
session.commit()
def test_2617(self):
A = self.classes.A
session = fixture_session()
def go():
a1 = session.query(A).first()
eq_(a1.related, [])
self.assert_sql_count(testing.db, go, 3)
class SubClassToSubClassMultiTest(AssertsCompiledSQL, fixtures.MappedTest):
"""
Two different joined-inh subclasses, led by a
parent, with two distinct endpoints:
parent -> subcl1 -> subcl2 -> (ep1, ep2)
the join to ep2 indicates we need to join
from the middle of the joinpoint, skipping ep1
"""
run_create_tables = None
run_deletes = None
__dialect__ = "default"
@classmethod
def define_tables(cls, metadata):
Table(
"parent",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
Table(
"base1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
Table(
"sub1",
metadata,
Column("id", Integer, ForeignKey("base1.id"), primary_key=True),
Column("parent_id", ForeignKey("parent.id")),
Column("subdata", String(30)),
)
Table(
"base2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("base1_id", ForeignKey("base1.id")),
Column("data", String(30)),
)
Table(
"sub2",
metadata,
Column("id", Integer, ForeignKey("base2.id"), primary_key=True),
Column("subdata", String(30)),
)
Table(
"ep1",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("base2_id", Integer, ForeignKey("base2.id")),
Column("data", String(30)),
)
Table(
"ep2",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("base2_id", Integer, ForeignKey("base2.id")),
Column("data", String(30)),
)
@classmethod
def setup_classes(cls):
class Parent(cls.Comparable):
pass
class Base1(cls.Comparable):
pass
class Sub1(Base1):
pass
class Base2(cls.Comparable):
pass
class Sub2(Base2):
pass
class EP1(cls.Comparable):
pass
class EP2(cls.Comparable):
pass
@classmethod
def _classes(cls):
return (
cls.classes.Parent,
cls.classes.Base1,
cls.classes.Base2,
cls.classes.Sub1,
cls.classes.Sub2,
cls.classes.EP1,
cls.classes.EP2,
)
@classmethod
def setup_mappers(cls):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = cls._classes()
cls.mapper_registry.map_imperatively(
Parent, cls.tables.parent, properties={"sub1": relationship(Sub1)}
)
cls.mapper_registry.map_imperatively(
Base1, cls.tables.base1, properties={"sub2": relationship(Sub2)}
)
cls.mapper_registry.map_imperatively(
Sub1, cls.tables.sub1, inherits=Base1
)
cls.mapper_registry.map_imperatively(
Base2,
cls.tables.base2,
properties={"ep1": relationship(EP1), "ep2": relationship(EP2)},
)
cls.mapper_registry.map_imperatively(
Sub2, cls.tables.sub2, inherits=Base2
)
cls.mapper_registry.map_imperatively(EP1, cls.tables.ep1)
cls.mapper_registry.map_imperatively(EP2, cls.tables.ep2)
def test_one(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = fixture_session()
self.assert_compile(
s.query(Parent)
.join(Parent.sub1, Sub1.sub2)
.join(Sub2.ep1)
.join(Sub2.ep2),
"SELECT parent.id AS parent_id, parent.data AS parent_data "
"FROM parent JOIN (base1 JOIN sub1 ON base1.id = sub1.id) "
"ON parent.id = sub1.parent_id JOIN "
"(base2 JOIN sub2 "
"ON base2.id = sub2.id) "
"ON base1.id = base2.base1_id "
"JOIN ep1 ON base2.id = ep1.base2_id "
"JOIN ep2 ON base2.id = ep2.base2_id",
)
def test_two(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s2a = aliased(Sub2, flat=True)
s = fixture_session()
self.assert_compile(
s.query(Parent).join(Parent.sub1).join(s2a, Sub1.sub2),
"SELECT parent.id AS parent_id, parent.data AS parent_data "
"FROM parent JOIN (base1 JOIN sub1 ON base1.id = sub1.id) "
"ON parent.id = sub1.parent_id JOIN "
"(base2 AS base2_1 JOIN sub2 AS sub2_1 "
"ON base2_1.id = sub2_1.id) "
"ON base1.id = base2_1.base1_id",
)
def test_three(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = fixture_session()
self.assert_compile(
s.query(Base1).join(Base1.sub2).join(Sub2.ep1).join(Sub2.ep2),
"SELECT base1.id AS base1_id, base1.data AS base1_data "
"FROM base1 JOIN (base2 JOIN sub2 "
"ON base2.id = sub2.id) ON base1.id = "
"base2.base1_id "
"JOIN ep1 ON base2.id = ep1.base2_id "
"JOIN ep2 ON base2.id = ep2.base2_id",
)
def test_four(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = fixture_session()
self.assert_compile(
s.query(Sub2)
.join(Base1, Base1.id == Sub2.base1_id)
.join(Sub2.ep1)
.join(Sub2.ep2),
"SELECT sub2.id AS sub2_id, base2.id AS base2_id, "
"base2.base1_id AS base2_base1_id, base2.data AS base2_data, "
"sub2.subdata AS sub2_subdata "
"FROM base2 JOIN sub2 ON base2.id = sub2.id "
"JOIN base1 ON base1.id = base2.base1_id "
"JOIN ep1 ON base2.id = ep1.base2_id "
"JOIN ep2 ON base2.id = ep2.base2_id",
)
def test_five(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = fixture_session()
self.assert_compile(
s.query(Sub2)
.join(Sub1, Sub1.id == Sub2.base1_id)
.join(Sub2.ep1)
.join(Sub2.ep2),
"SELECT sub2.id AS sub2_id, base2.id AS base2_id, "
"base2.base1_id AS base2_base1_id, base2.data AS base2_data, "
"sub2.subdata AS sub2_subdata "
"FROM base2 JOIN sub2 ON base2.id = sub2.id "
"JOIN "
"(base1 JOIN sub1 ON base1.id = sub1.id) "
"ON sub1.id = base2.base1_id "
"JOIN ep1 ON base2.id = ep1.base2_id "
"JOIN ep2 ON base2.id = ep2.base2_id",
)
def test_six_legacy(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = fixture_session()
# as of from_self() changing in
# I3abfb45dd6e50f84f29d39434caa0b550ce27864,
# this query is coming out instead which is equivalent, but not
# totally sure where this happens
with testing.expect_deprecated(r"The Query.from_self\(\) method"):
self.assert_compile(
s.query(Sub2).from_self().join(Sub2.ep1).join(Sub2.ep2),
"SELECT anon_1.sub2_id AS anon_1_sub2_id, "
"anon_1.base2_base1_id AS anon_1_base2_base1_id, "
"anon_1.base2_data AS anon_1_base2_data, "
"anon_1.sub2_subdata AS anon_1_sub2_subdata "
"FROM (SELECT sub2.id AS sub2_id, base2.id AS base2_id, "
"base2.base1_id AS base2_base1_id, base2.data AS base2_data, "
"sub2.subdata AS sub2_subdata "
"FROM base2 JOIN sub2 ON base2.id = sub2.id) AS anon_1 "
"JOIN ep1 ON anon_1.sub2_id = ep1.base2_id "
"JOIN ep2 ON anon_1.sub2_id = ep2.base2_id",
)
def test_six(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
# as of from_self() changing in
# I3abfb45dd6e50f84f29d39434caa0b550ce27864,
# this query is coming out instead which is equivalent, but not
# totally sure where this happens
stmt = select(Sub2)
subq = aliased(
Sub2,
stmt.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL).subquery(),
)
stmt = (
select(subq)
.join(subq.ep1)
.join(Sub2.ep2)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
)
self.assert_compile(
stmt,
"SELECT anon_1.sub2_id AS anon_1_sub2_id, "
"anon_1.base2_base1_id AS anon_1_base2_base1_id, "
"anon_1.base2_data AS anon_1_base2_data, "
"anon_1.sub2_subdata AS anon_1_sub2_subdata "
"FROM (SELECT sub2.id AS sub2_id, base2.id AS base2_id, "
"base2.base1_id AS base2_base1_id, base2.data AS base2_data, "
"sub2.subdata AS sub2_subdata "
"FROM base2 JOIN sub2 ON base2.id = sub2.id) AS anon_1 "
"JOIN ep1 ON anon_1.sub2_id = ep1.base2_id "
"JOIN ep2 ON anon_1.sub2_id = ep2.base2_id",
)
def test_seven_legacy(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
s = fixture_session()
# as of from_self() changing in
# I3abfb45dd6e50f84f29d39434caa0b550ce27864,
# this query is coming out instead which is equivalent, but not
# totally sure where this happens
with testing.expect_deprecated(r"The Query.from_self\(\) method"):
self.assert_compile(
# adding Sub2 to the entities list helps it,
# otherwise the joins for Sub2.ep1/ep2 don't have columns
# to latch onto. Can't really make it better than this
s.query(Parent, Sub2)
.join(Parent.sub1)
.join(Sub1.sub2)
.from_self()
.join(Sub2.ep1)
.join(Sub2.ep2),
"SELECT anon_1.parent_id AS anon_1_parent_id, "
"anon_1.parent_data AS anon_1_parent_data, "
"anon_1.sub2_id AS anon_1_sub2_id, "
"anon_1.base2_base1_id AS anon_1_base2_base1_id, "
"anon_1.base2_data AS anon_1_base2_data, "
"anon_1.sub2_subdata AS anon_1_sub2_subdata "
"FROM (SELECT parent.id AS parent_id, "
"parent.data AS parent_data, "
"sub2.id AS sub2_id, "
"base2.id AS base2_id, "
"base2.base1_id AS base2_base1_id, "
"base2.data AS base2_data, "
"sub2.subdata AS sub2_subdata "
"FROM parent JOIN (base1 JOIN sub1 ON base1.id = sub1.id) "
"ON parent.id = sub1.parent_id JOIN "
"(base2 JOIN sub2 ON base2.id = sub2.id) "
"ON base1.id = base2.base1_id) AS anon_1 "
"JOIN ep1 ON anon_1.sub2_id = ep1.base2_id "
"JOIN ep2 ON anon_1.sub2_id = ep2.base2_id",
)
def test_seven(self):
Parent, Base1, Base2, Sub1, Sub2, EP1, EP2 = self._classes()
# as of from_self() changing in
# I3abfb45dd6e50f84f29d39434caa0b550ce27864,
# this query is coming out instead which is equivalent, but not
# totally sure where this happens
subq = (
select(Parent, Sub2)
.join(Parent.sub1)
.join(Sub1.sub2)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
# another 1.4 supercharged select() statement ;)
palias = aliased(Parent, subq)
sub2alias = aliased(Sub2, subq)
stmt = (
select(palias, sub2alias)
.join(sub2alias.ep1)
.join(sub2alias.ep2)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
)
self.assert_compile(
# adding Sub2 to the entities list helps it,
# otherwise the joins for Sub2.ep1/ep2 don't have columns
# to latch onto. Can't really make it better than this
stmt,
"SELECT anon_1.parent_id AS anon_1_parent_id, "
"anon_1.parent_data AS anon_1_parent_data, "
"anon_1.sub2_id AS anon_1_sub2_id, "
"anon_1.base2_base1_id AS anon_1_base2_base1_id, "
"anon_1.base2_data AS anon_1_base2_data, "
"anon_1.sub2_subdata AS anon_1_sub2_subdata "
"FROM (SELECT parent.id AS parent_id, parent.data AS parent_data, "
"sub2.id AS sub2_id, "
"base2.id AS base2_id, "
"base2.base1_id AS base2_base1_id, "
"base2.data AS base2_data, "
"sub2.subdata AS sub2_subdata "
"FROM parent JOIN (base1 JOIN sub1 ON base1.id = sub1.id) "
"ON parent.id = sub1.parent_id JOIN "
"(base2 JOIN sub2 ON base2.id = sub2.id) "
"ON base1.id = base2.base1_id) AS anon_1 "
"JOIN ep1 ON anon_1.sub2_id = ep1.base2_id "
"JOIN ep2 ON anon_1.sub2_id = ep2.base2_id",
)
class JoinedloadWPolyOfTypeContinued(
fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL
):
"""test for #5082"""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
foos = relationship("Foo", back_populates="owner")
class Foo(Base):
__tablename__ = "foos"
__mapper_args__ = {"polymorphic_on": "type"}
id = Column(Integer, primary_key=True)
type = Column(String(10), nullable=False)
owner_id = Column(Integer, ForeignKey("users.id"))
owner = relationship("User", back_populates="foos")
bar_id = Column(ForeignKey("bars.id"))
bar = relationship("Bar")
class SubFoo(Foo):
__tablename__ = "foos_sub"
__mapper_args__ = {"polymorphic_identity": "SUB"}
id = Column(Integer, ForeignKey("foos.id"), primary_key=True)
baz = Column(Integer)
sub_bar_id = Column(Integer, ForeignKey("sub_bars.id"))
sub_bar = relationship("SubBar")
class Bar(Base):
__tablename__ = "bars"
id = Column(Integer, primary_key=True)
fred_id = Column(Integer, ForeignKey("freds.id"), nullable=False)
fred = relationship("Fred")
class SubBar(Base):
__tablename__ = "sub_bars"
id = Column(Integer, primary_key=True)
fred_id = Column(Integer, ForeignKey("freds.id"), nullable=False)
fred = relationship("Fred")
class Fred(Base):
__tablename__ = "freds"
id = Column(Integer, primary_key=True)
@classmethod
def insert_data(cls, connection):
User, Fred, SubBar, Bar, SubFoo = cls.classes(
"User", "Fred", "SubBar", "Bar", "SubFoo"
)
user = User(id=1)
fred = Fred(id=1)
bar = Bar(fred=fred)
sub_bar = SubBar(fred=fred)
rectangle = SubFoo(owner=user, baz=10, bar=bar, sub_bar=sub_bar)
s = Session(connection)
s.add_all([user, fred, bar, sub_bar, rectangle])
s.commit()
def test_joined_load_lastlink_subclass(self):
Foo, User, SubBar = self.classes("Foo", "User", "SubBar")
s = fixture_session()
foo_polymorphic = with_polymorphic(Foo, "*", aliased=True)
foo_load = joinedload(User.foos.of_type(foo_polymorphic))
query = s.query(User).options(
foo_load.joinedload(foo_polymorphic.SubFoo.sub_bar).joinedload(
SubBar.fred
)
)
self.assert_compile(
query,
"SELECT users.id AS users_id, anon_1.foos_id AS anon_1_foos_id, "
"anon_1.foos_type AS anon_1_foos_type, anon_1.foos_owner_id "
"AS anon_1_foos_owner_id, "
"anon_1.foos_bar_id AS anon_1_foos_bar_id, "
"freds_1.id AS freds_1_id, sub_bars_1.id "
"AS sub_bars_1_id, sub_bars_1.fred_id AS sub_bars_1_fred_id, "
"anon_1.foos_sub_id AS anon_1_foos_sub_id, "
"anon_1.foos_sub_baz AS anon_1_foos_sub_baz, "
"anon_1.foos_sub_sub_bar_id AS anon_1_foos_sub_sub_bar_id "
"FROM users LEFT OUTER JOIN "
"(SELECT foos.id AS foos_id, foos.type AS foos_type, "
"foos.owner_id AS foos_owner_id, foos.bar_id AS foos_bar_id, "
"foos_sub.id AS foos_sub_id, "
"foos_sub.baz AS foos_sub_baz, "
"foos_sub.sub_bar_id AS foos_sub_sub_bar_id "
"FROM foos LEFT OUTER JOIN foos_sub ON foos.id = foos_sub.id) "
"AS anon_1 ON users.id = anon_1.foos_owner_id "
"LEFT OUTER JOIN sub_bars AS sub_bars_1 "
"ON sub_bars_1.id = anon_1.foos_sub_sub_bar_id "
"LEFT OUTER JOIN freds AS freds_1 "
"ON freds_1.id = sub_bars_1.fred_id",
)
def go():
user = query.one()
user.foos[0].sub_bar
user.foos[0].sub_bar.fred
self.assert_sql_count(testing.db, go, 1)
def test_joined_load_lastlink_baseclass(self):
Foo, User, Bar = self.classes("Foo", "User", "Bar")
s = fixture_session()
foo_polymorphic = with_polymorphic(Foo, "*", aliased=True)
foo_load = joinedload(User.foos.of_type(foo_polymorphic))
query = s.query(User).options(
foo_load.joinedload(foo_polymorphic.bar).joinedload(Bar.fred)
)
self.assert_compile(
query,
"SELECT users.id AS users_id, freds_1.id AS freds_1_id, "
"bars_1.id AS bars_1_id, "
"bars_1.fred_id AS bars_1_fred_id, "
"anon_1.foos_id AS anon_1_foos_id, "
"anon_1.foos_type AS anon_1_foos_type, anon_1.foos_owner_id AS "
"anon_1_foos_owner_id, anon_1.foos_bar_id AS anon_1_foos_bar_id, "
"anon_1.foos_sub_id AS anon_1_foos_sub_id, anon_1.foos_sub_baz AS "
"anon_1_foos_sub_baz, "
"anon_1.foos_sub_sub_bar_id AS anon_1_foos_sub_sub_bar_id "
"FROM users LEFT OUTER JOIN (SELECT foos.id AS foos_id, "
"foos.type AS foos_type, "
"foos.owner_id AS foos_owner_id, foos.bar_id AS foos_bar_id, "
"foos_sub.id AS "
"foos_sub_id, foos_sub.baz AS foos_sub_baz, "
"foos_sub.sub_bar_id AS "
"foos_sub_sub_bar_id FROM foos "
"LEFT OUTER JOIN foos_sub ON foos.id = "
"foos_sub.id) AS anon_1 ON users.id = anon_1.foos_owner_id "
"LEFT OUTER JOIN bars "
"AS bars_1 ON bars_1.id = anon_1.foos_bar_id "
"LEFT OUTER JOIN freds AS freds_1 ON freds_1.id = bars_1.fred_id",
)
def go():
user = query.one()
user.foos[0].bar
user.foos[0].bar.fred
self.assert_sql_count(testing.db, go, 1)
class ContainsEagerMultipleOfType(
fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL
):
"""test for #5107"""
__dialect__ = "default"
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class X(Base):
__tablename__ = "x"
id = Column(Integer, primary_key=True)
a_id = Column(Integer, ForeignKey("a.id"))
a = relationship("A", back_populates="x")
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
b = relationship("B", back_populates="a")
kind = Column(String(30))
x = relationship("X", back_populates="a")
__mapper_args__ = {
"polymorphic_identity": "a",
"polymorphic_on": kind,
"with_polymorphic": "*",
}
class B(A):
a_id = Column(Integer, ForeignKey("a.id"))
a = relationship(
"A", back_populates="b", uselist=False, remote_side=A.id
)
__mapper_args__ = {"polymorphic_identity": "b"}
def test_contains_eager_multi_alias(self):
X, B, A = self.classes("X", "B", "A")
s = fixture_session()
a_b_alias = aliased(B, name="a_b")
b_x_alias = aliased(X, name="b_x")
q = (
s.query(A)
.outerjoin(A.b.of_type(a_b_alias))
.outerjoin(a_b_alias.x.of_type(b_x_alias))
.options(
contains_eager(A.b.of_type(a_b_alias)).contains_eager(
a_b_alias.x.of_type(b_x_alias)
)
)
)
self.assert_compile(
q,
"SELECT b_x.id AS b_x_id, b_x.a_id AS b_x_a_id, a_b.id AS a_b_id, "
"a_b.kind AS a_b_kind, a_b.a_id AS a_b_a_id, a.id AS a_id_1, "
"a.kind AS a_kind, a.a_id AS a_a_id FROM a "
"LEFT OUTER JOIN a AS a_b ON a.id = a_b.a_id AND a_b.kind IN "
"([POSTCOMPILE_kind_1]) LEFT OUTER JOIN x AS b_x "
"ON a_b.id = b_x.a_id",
)
class JoinedloadSinglePolysubSingle(
fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL
):
"""exercise issue #3611, using the test from dupe issue 3614"""
run_define_tables = None
__dialect__ = "default"
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
class UserRole(Base):
__tablename__ = "user_roles"
id = Column(Integer, primary_key=True)
row_type = Column(String(50), nullable=False)
__mapper_args__ = {"polymorphic_on": row_type}
user_id = Column(Integer, ForeignKey("users.id"), nullable=False)
user = relationship("User", lazy=False)
class Admin(UserRole):
__tablename__ = "admins"
__mapper_args__ = {"polymorphic_identity": "admin"}
id = Column(Integer, ForeignKey("user_roles.id"), primary_key=True)
class Thing(Base):
__tablename__ = "things"
id = Column(Integer, primary_key=True)
admin_id = Column(Integer, ForeignKey("admins.id"))
admin = relationship("Admin", lazy=False)
def test_query(self):
Thing = self.classes.Thing
sess = fixture_session()
self.assert_compile(
sess.query(Thing),
"SELECT things.id AS things_id, "
"things.admin_id AS things_admin_id, "
"users_1.id AS users_1_id, admins_1.id AS admins_1_id, "
"user_roles_1.id AS user_roles_1_id, "
"user_roles_1.row_type AS user_roles_1_row_type, "
"user_roles_1.user_id AS user_roles_1_user_id FROM things "
"LEFT OUTER JOIN (user_roles AS user_roles_1 JOIN admins "
"AS admins_1 ON user_roles_1.id = admins_1.id) ON "
"admins_1.id = things.admin_id "
"LEFT OUTER JOIN users AS "
"users_1 ON users_1.id = user_roles_1.user_id",
)
class JoinedloadOverWPolyAliased(
fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL
):
"""exercise issues in #3593 and #3611"""
run_setup_mappers = "each"
run_setup_classes = "each"
run_define_tables = "each"
__dialect__ = "default"
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Owner(Base):
__tablename__ = "owner"
id = Column(Integer, primary_key=True)
type = Column(String(20))
__mapper_args__ = {
"polymorphic_on": type,
"with_polymorphic": ("*", None),
}
class SubOwner(Owner):
__mapper_args__ = {"polymorphic_identity": "so"}
class Parent(Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
type = Column(String(20))
__mapper_args__ = {
"polymorphic_on": type,
"polymorphic_identity": "parent",
"with_polymorphic": ("*", None),
}
class Sub1(Parent):
__mapper_args__ = {"polymorphic_identity": "s1"}
class Link(Base):
__tablename__ = "link"
parent_id = Column(
Integer, ForeignKey("parent.id"), primary_key=True
)
child_id = Column(
Integer, ForeignKey("parent.id"), primary_key=True
)
def _fixture_from_base(self):
Parent = self.classes.Parent
Link = self.classes.Link
Link.child = relationship(
Parent, primaryjoin=Link.child_id == Parent.id
)
Parent.links = relationship(
Link, primaryjoin=Parent.id == Link.parent_id
)
return Parent
def _fixture_from_subclass(self):
Sub1 = self.classes.Sub1
Link = self.classes.Link
Parent = self.classes.Parent
Link.child = relationship(
Parent, primaryjoin=Link.child_id == Parent.id
)
Sub1.links = relationship(Link, primaryjoin=Sub1.id == Link.parent_id)
return Sub1
def _fixture_to_subclass_to_base(self):
Owner = self.classes.Owner
Parent = self.classes.Parent
Sub1 = self.classes.Sub1
Link = self.classes.Link
# Link -> Sub1 -> Owner
Link.child = relationship(Sub1, primaryjoin=Link.child_id == Sub1.id)
Parent.owner_id = Column(ForeignKey("owner.id"))
Parent.owner = relationship(Owner)
return Parent
def _fixture_to_base_to_base(self):
Owner = self.classes.Owner
Parent = self.classes.Parent
Link = self.classes.Link
# Link -> Parent -> Owner
Link.child = relationship(
Parent, primaryjoin=Link.child_id == Parent.id
)
Parent.owner_id = Column(ForeignKey("owner.id"))
Parent.owner = relationship(Owner)
return Parent
def test_from_base(self):
self._test_poly_single_poly(self._fixture_from_base)
def test_from_sub(self):
self._test_poly_single_poly(self._fixture_from_subclass)
def test_to_sub_to_base(self):
self._test_single_poly_poly(self._fixture_to_subclass_to_base)
def test_to_base_to_base(self):
self._test_single_poly_poly(self._fixture_to_base_to_base)
def _test_poly_single_poly(self, fn):
cls = fn()
Link = self.classes.Link
session = fixture_session()
q = session.query(cls).options(
joinedload(cls.links).joinedload(Link.child).joinedload(cls.links)
)
if cls is self.classes.Sub1:
extra = " WHERE parent.type IN ([POSTCOMPILE_type_1])"
else:
extra = ""
self.assert_compile(
q,
"SELECT parent.id AS parent_id, parent.type AS parent_type, "
"link_1.parent_id AS link_1_parent_id, "
"link_1.child_id AS link_1_child_id, "
"parent_1.id AS parent_1_id, parent_1.type AS parent_1_type, "
"link_2.parent_id AS link_2_parent_id, "
"link_2.child_id AS link_2_child_id "
"FROM parent "
"LEFT OUTER JOIN link AS link_1 ON parent.id = link_1.parent_id "
"LEFT OUTER JOIN parent "
"AS parent_1 ON link_1.child_id = parent_1.id "
"LEFT OUTER JOIN link AS link_2 "
"ON parent_1.id = link_2.parent_id" + extra,
)
def _test_single_poly_poly(self, fn):
parent_cls = fn()
Link = self.classes.Link
session = fixture_session()
q = session.query(Link).options(
joinedload(Link.child).joinedload(parent_cls.owner)
)
if Link.child.property.mapper.class_ is self.classes.Sub1:
extra = "AND parent_1.type IN ([POSTCOMPILE_type_1]) "
else:
extra = ""
self.assert_compile(
q,
"SELECT link.parent_id AS link_parent_id, "
"link.child_id AS link_child_id, parent_1.id AS parent_1_id, "
"parent_1.type AS parent_1_type, "
"parent_1.owner_id AS parent_1_owner_id, "
"owner_1.id AS owner_1_id, owner_1.type AS owner_1_type "
"FROM link LEFT OUTER JOIN parent AS parent_1 "
"ON link.child_id = parent_1.id "
+ extra
+ "LEFT OUTER JOIN owner AS owner_1 "
"ON owner_1.id = parent_1.owner_id",
)
def test_local_wpoly(self):
Sub1 = self._fixture_from_subclass()
Parent = self.classes.Parent
Link = self.classes.Link
poly = with_polymorphic(Parent, [Sub1])
session = fixture_session()
q = session.query(poly).options(
joinedload(poly.Sub1.links)
.joinedload(Link.child.of_type(Sub1))
.joinedload(Sub1.links)
)
self.assert_compile(
q,
"SELECT parent.id AS parent_id, parent.type AS parent_type, "
"link_1.parent_id AS link_1_parent_id, "
"link_1.child_id AS link_1_child_id, "
"parent_1.id AS parent_1_id, parent_1.type AS parent_1_type, "
"link_2.parent_id AS link_2_parent_id, "
"link_2.child_id AS link_2_child_id FROM parent "
"LEFT OUTER JOIN link AS link_1 ON parent.id = link_1.parent_id "
"LEFT OUTER JOIN parent AS parent_1 "
"ON link_1.child_id = parent_1.id "
"LEFT OUTER JOIN link AS link_2 ON parent_1.id = link_2.parent_id",
)
def test_local_wpoly_innerjoins(self):
# test for issue #3988
Sub1 = self._fixture_from_subclass()
Parent = self.classes.Parent
Link = self.classes.Link
poly = with_polymorphic(Parent, [Sub1])
session = fixture_session()
q = session.query(poly).options(
joinedload(poly.Sub1.links, innerjoin=True)
.joinedload(Link.child.of_type(Sub1), innerjoin=True)
.joinedload(Sub1.links, innerjoin=True)
)
self.assert_compile(
q,
"SELECT parent.id AS parent_id, parent.type AS parent_type, "
"link_1.parent_id AS link_1_parent_id, "
"link_1.child_id AS link_1_child_id, "
"parent_1.id AS parent_1_id, parent_1.type AS parent_1_type, "
"link_2.parent_id AS link_2_parent_id, "
"link_2.child_id AS link_2_child_id FROM parent "
"LEFT OUTER JOIN link AS link_1 ON parent.id = link_1.parent_id "
"LEFT OUTER JOIN parent AS parent_1 "
"ON link_1.child_id = parent_1.id "
"LEFT OUTER JOIN link AS link_2 ON parent_1.id = link_2.parent_id",
)
def test_local_wpoly_innerjoins_roundtrip(self):
# test for issue #3988
Sub1 = self._fixture_from_subclass()
Parent = self.classes.Parent
Link = self.classes.Link
session = fixture_session()
session.add_all([Parent(), Parent()])
# represents "Parent" and "Sub1" rows
poly = with_polymorphic(Parent, [Sub1])
# innerjoin for Sub1 only, but this needs
# to be cancelled because the Parent rows
# would be omitted
q = session.query(poly).options(
joinedload(poly.Sub1.links, innerjoin=True).joinedload(
Link.child.of_type(Sub1), innerjoin=True
)
)
eq_(len(q.all()), 2)
class JoinAcrossJoinedInhMultiPath(
fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL
):
"""test long join paths with a joined-inh in the middle, where we go multiple
times across the same joined-inh to the same target but with other classes
in the middle. E.g. test [ticket:2908]
"""
run_setup_mappers = "once"
__dialect__ = "default"
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Root(Base):
__tablename__ = "root"
id = Column(Integer, primary_key=True)
sub1_id = Column(Integer, ForeignKey("sub1.id"))
intermediate = relationship("Intermediate")
sub1 = relationship("Sub1")
class Intermediate(Base):
__tablename__ = "intermediate"
id = Column(Integer, primary_key=True)
sub1_id = Column(Integer, ForeignKey("sub1.id"))
root_id = Column(Integer, ForeignKey("root.id"))
sub1 = relationship("Sub1")
class Parent(Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
class Sub1(Parent):
__tablename__ = "sub1"
id = Column(Integer, ForeignKey("parent.id"), primary_key=True)
target = relationship("Target")
class Target(Base):
__tablename__ = "target"
id = Column(Integer, primary_key=True)
sub1_id = Column(Integer, ForeignKey("sub1.id"))
def test_join(self):
Root, Intermediate, Sub1, Target = (
self.classes.Root,
self.classes.Intermediate,
self.classes.Sub1,
self.classes.Target,
)
s1_alias = aliased(Sub1)
s2_alias = aliased(Sub1)
t1_alias = aliased(Target)
t2_alias = aliased(Target)
sess = fixture_session()
q = (
sess.query(Root)
.join(s1_alias, Root.sub1)
.join(t1_alias, s1_alias.target)
.join(Root.intermediate)
.join(s2_alias, Intermediate.sub1)
.join(t2_alias, s2_alias.target)
)
self.assert_compile(
q,
"SELECT root.id AS root_id, root.sub1_id AS root_sub1_id "
"FROM root "
"JOIN (SELECT parent.id AS parent_id, sub1.id AS sub1_id "
"FROM parent JOIN sub1 ON parent.id = sub1.id) AS anon_1 "
"ON anon_1.sub1_id = root.sub1_id "
"JOIN target AS target_1 ON anon_1.sub1_id = target_1.sub1_id "
"JOIN intermediate ON root.id = intermediate.root_id "
"JOIN (SELECT parent.id AS parent_id, sub1.id AS sub1_id "
"FROM parent JOIN sub1 ON parent.id = sub1.id) AS anon_2 "
"ON anon_2.sub1_id = intermediate.sub1_id "
"JOIN target AS target_2 ON anon_2.sub1_id = target_2.sub1_id",
)
def test_join_flat(self):
Root, Intermediate, Sub1, Target = (
self.classes.Root,
self.classes.Intermediate,
self.classes.Sub1,
self.classes.Target,
)
s1_alias = aliased(Sub1, flat=True)
s2_alias = aliased(Sub1, flat=True)
t1_alias = aliased(Target)
t2_alias = aliased(Target)
sess = fixture_session()
q = (
sess.query(Root)
.join(s1_alias, Root.sub1)
.join(t1_alias, s1_alias.target)
.join(Root.intermediate)
.join(s2_alias, Intermediate.sub1)
.join(t2_alias, s2_alias.target)
)
self.assert_compile(
q,
"SELECT root.id AS root_id, root.sub1_id AS root_sub1_id "
"FROM root "
"JOIN (parent AS parent_1 JOIN sub1 AS sub1_1 "
"ON parent_1.id = sub1_1.id) "
"ON sub1_1.id = root.sub1_id "
"JOIN target AS target_1 ON sub1_1.id = target_1.sub1_id "
"JOIN intermediate ON root.id = intermediate.root_id "
"JOIN (parent AS parent_2 JOIN sub1 AS sub1_2 "
"ON parent_2.id = sub1_2.id) "
"ON sub1_2.id = intermediate.sub1_id "
"JOIN target AS target_2 ON sub1_2.id = target_2.sub1_id",
)
def test_joinedload(self):
Root, Intermediate, Sub1 = (
self.classes.Root,
self.classes.Intermediate,
self.classes.Sub1,
)
sess = fixture_session()
q = sess.query(Root).options(
joinedload(Root.sub1).joinedload(Sub1.target),
joinedload(Root.intermediate)
.joinedload(Intermediate.sub1)
.joinedload(Sub1.target),
)
self.assert_compile(
q,
"SELECT root.id AS root_id, root.sub1_id AS root_sub1_id, "
"target_1.id AS target_1_id, "
"target_1.sub1_id AS target_1_sub1_id, "
"sub1_1.id AS sub1_1_id, parent_1.id AS parent_1_id, "
"intermediate_1.id AS intermediate_1_id, "
"intermediate_1.sub1_id AS intermediate_1_sub1_id, "
"intermediate_1.root_id AS intermediate_1_root_id, "
"target_2.id AS target_2_id, "
"target_2.sub1_id AS target_2_sub1_id, "
"sub1_2.id AS sub1_2_id, parent_2.id AS parent_2_id "
"FROM root "
"LEFT OUTER JOIN intermediate AS intermediate_1 "
"ON root.id = intermediate_1.root_id "
"LEFT OUTER JOIN (parent AS parent_1 JOIN sub1 AS sub1_1 "
"ON parent_1.id = sub1_1.id) "
"ON sub1_1.id = intermediate_1.sub1_id "
"LEFT OUTER JOIN target AS target_1 "
"ON sub1_1.id = target_1.sub1_id "
"LEFT OUTER JOIN (parent AS parent_2 JOIN sub1 AS sub1_2 "
"ON parent_2.id = sub1_2.id) ON sub1_2.id = root.sub1_id "
"LEFT OUTER JOIN target AS target_2 "
"ON sub1_2.id = target_2.sub1_id",
)
class MultipleAdaptUsesEntityOverTableTest(
AssertsCompiledSQL, fixtures.MappedTest
):
__dialect__ = "default"
run_create_tables = None
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String),
)
Table(
"b",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
)
Table(
"c",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("bid", Integer, ForeignKey("b.id")),
)
Table(
"d",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("cid", Integer, ForeignKey("c.id")),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(A):
pass
class C(A):
pass
class D(A):
pass
@classmethod
def setup_mappers(cls):
A, B, C, D = cls.classes.A, cls.classes.B, cls.classes.C, cls.classes.D
a, b, c, d = cls.tables.a, cls.tables.b, cls.tables.c, cls.tables.d
cls.mapper_registry.map_imperatively(A, a)
cls.mapper_registry.map_imperatively(B, b, inherits=A)
cls.mapper_registry.map_imperatively(C, c, inherits=A)
cls.mapper_registry.map_imperatively(D, d, inherits=A)
def _two_join_fixture(self):
B, C, D = (self.classes.B, self.classes.C, self.classes.D)
s = fixture_session()
return (
s.query(B.name, C.name, D.name)
.select_from(B)
.join(C, C.bid == B.id)
.join(D, D.cid == C.id)
)
def test_two_joins_adaption(self):
a, c, d = self.tables.a, self.tables.c, self.tables.d
with _aliased_join_warning("C->c"), _aliased_join_warning("D->d"):
q = self._two_join_fixture()._compile_state()
btoc = q.from_clauses[0].left
ac_adapted = btoc.right.element.left
c_adapted = btoc.right.element.right
is_(ac_adapted.element, a)
is_(c_adapted.element, c)
ctod = q.from_clauses[0].right
ad_adapted = ctod.element.left
d_adapted = ctod.element.right
is_(ad_adapted.element, a)
is_(d_adapted.element, d)
bname, cname, dname = q._entities
adapter = q._get_current_adapter()
b_name_adapted = adapter(bname.column, False)
c_name_adapted = adapter(cname.column, False)
d_name_adapted = adapter(dname.column, False)
assert bool(b_name_adapted == a.c.name)
assert bool(c_name_adapted == ac_adapted.c.name)
assert bool(d_name_adapted == ad_adapted.c.name)
def test_two_joins_sql(self):
q = self._two_join_fixture()
with _aliased_join_warning("C->c"), _aliased_join_warning("D->d"):
self.assert_compile(
q,
"SELECT a.name AS a_name, a_1.name AS a_1_name, "
"a_2.name AS a_2_name "
"FROM a JOIN b ON a.id = b.id JOIN "
"(a AS a_1 JOIN c AS c_1 ON a_1.id = c_1.id) "
"ON c_1.bid = b.id "
"JOIN (a AS a_2 JOIN d AS d_1 ON a_2.id = d_1.id) "
"ON d_1.cid = c_1.id",
)
class SameNameOnJoined(fixtures.MappedTest):
run_setup_mappers = "once"
run_inserts = None
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("t", String(5)),
)
Table(
"a_sub",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
)
Table(
"b",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("a_id", Integer, ForeignKey("a.id")),
)
@classmethod
def setup_mappers(cls):
class A(cls.Comparable):
pass
class ASub(A):
pass
class B(cls.Comparable):
pass
cls.mapper_registry.map_imperatively(
A,
cls.tables.a,
polymorphic_on=cls.tables.a.c.t,
polymorphic_identity="a",
properties={"bs": relationship(B, cascade="all, delete-orphan")},
)
cls.mapper_registry.map_imperatively(
ASub,
cls.tables.a_sub,
inherits=A,
polymorphic_identity="asub",
properties={"bs": relationship(B, cascade="all, delete-orphan")},
)
cls.mapper_registry.map_imperatively(B, cls.tables.b)
def test_persist(self):
A, ASub, B = self.classes("A", "ASub", "B")
s = Session(testing.db)
s.add_all([A(bs=[B(), B(), B()]), ASub(bs=[B(), B(), B()])])
s.commit()
eq_(s.query(B).count(), 6)
for a in s.query(A):
eq_(len(a.bs), 3)
s.delete(a)
s.commit()
eq_(s.query(B).count(), 0)
class BetweenSubclassJoinWExtraJoinedLoad(
fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL
):
"""test for [ticket:3884]"""
run_define_tables = None
__dialect__ = "default"
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Person(Base):
__tablename__ = "people"
id = Column(Integer, primary_key=True)
discriminator = Column("type", String(50))
__mapper_args__ = {"polymorphic_on": discriminator}
class Manager(Person):
__tablename__ = "managers"
__mapper_args__ = {"polymorphic_identity": "manager"}
id = Column(Integer, ForeignKey("people.id"), primary_key=True)
class Engineer(Person):
__tablename__ = "engineers"
__mapper_args__ = {"polymorphic_identity": "engineer"}
id = Column(Integer, ForeignKey("people.id"), primary_key=True)
primary_language = Column(String(50))
manager_id = Column(Integer, ForeignKey("managers.id"))
manager = relationship(
Manager, primaryjoin=(Manager.id == manager_id)
)
class LastSeen(Base):
__tablename__ = "seen"
id = Column(Integer, ForeignKey("people.id"), primary_key=True)
timestamp = Column(Integer)
taggable = relationship(
Person,
primaryjoin=(Person.id == id),
backref=backref("last_seen", lazy=False),
)
@testing.combinations((True,), (False,), argnames="autoalias")
def test_query_auto(self, autoalias):
Engineer, Manager = self.classes("Engineer", "Manager")
sess = fixture_session()
if autoalias:
# eager join is both from Enginer->LastSeen as well as
# Manager->LastSeen. In the case of Manager->LastSeen,
# Manager is internally aliased, and comes to JoinedEagerLoader
# with no "parent" entity but an adapter.
q = sess.query(Engineer, Manager).join(Engineer.manager)
else:
m1 = aliased(Manager, flat=True)
q = sess.query(Engineer, m1).join(Engineer.manager.of_type(m1))
with _aliased_join_warning(
"Manager->managers"
) if autoalias else util.nullcontext():
self.assert_compile(
q,
"SELECT people.type AS people_type, engineers.id AS "
"engineers_id, "
"people.id AS people_id, "
"engineers.primary_language AS engineers_primary_language, "
"engineers.manager_id AS engineers_manager_id, "
"people_1.type AS people_1_type, "
"managers_1.id AS managers_1_id, "
"people_1.id AS people_1_id, seen_1.id AS seen_1_id, "
"seen_1.timestamp AS seen_1_timestamp, "
"seen_2.id AS seen_2_id, "
"seen_2.timestamp AS seen_2_timestamp "
"FROM people JOIN engineers ON people.id = engineers.id "
"JOIN (people AS people_1 JOIN managers AS managers_1 "
"ON people_1.id = managers_1.id) "
"ON managers_1.id = engineers.manager_id LEFT OUTER JOIN "
"seen AS seen_1 ON people.id = seen_1.id LEFT OUTER JOIN "
"seen AS seen_2 ON people_1.id = seen_2.id",
)
class M2ODontLoadSiblingTest(fixtures.DeclarativeMappedTest):
"""test for #5210"""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Parent(Base, ComparableEntity):
__tablename__ = "parents"
id = Column(Integer, primary_key=True)
child_type = Column(String(50), nullable=False)
__mapper_args__ = {
"polymorphic_on": child_type,
}
class Child1(Parent):
__tablename__ = "children_1"
id = Column(Integer, ForeignKey(Parent.id), primary_key=True)
__mapper_args__ = {
"polymorphic_identity": "child1",
}
class Child2(Parent):
__tablename__ = "children_2"
id = Column(Integer, ForeignKey(Parent.id), primary_key=True)
__mapper_args__ = {
"polymorphic_identity": "child2",
}
class Other(Base):
__tablename__ = "others"
id = Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey(Parent.id))
parent = relationship(Parent)
child2 = relationship(Child2, viewonly=True)
@classmethod
def insert_data(cls, connection):
Other, Child1 = cls.classes("Other", "Child1")
s = Session(connection)
obj = Other(parent=Child1())
s.add(obj)
s.commit()
def test_load_m2o_emit_query(self):
Other, Child1 = self.classes("Other", "Child1")
s = fixture_session()
obj = s.query(Other).first()
is_(obj.child2, None)
eq_(obj.parent, Child1())
def test_load_m2o_use_get(self):
Other, Child1 = self.classes("Other", "Child1")
s = fixture_session()
obj = s.query(Other).first()
c1 = s.query(Child1).first()
is_(obj.child2, None)
is_(obj.parent, c1)
| monetate/sqlalchemy | test/orm/inheritance/test_relationship.py | Python | mit | 95,137 |
"""The tests for the notify demo platform."""
import unittest
import blumate.components.notify as notify
from blumate.components.notify import demo
from tests.common import get_test_home_assistant
class TestNotifyDemo(unittest.TestCase):
"""Test the demo notify."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.assertTrue(notify.setup(self.hass, {
'notify': {
'platform': 'demo'
}
}))
self.events = []
def record_event(event):
"""Record event to send notification."""
self.events.append(event)
self.hass.bus.listen(demo.EVENT_NOTIFY, record_event)
def tearDown(self): # pylint: disable=invalid-name
""""Stop down everything that was started."""
self.hass.stop()
def test_sending_none_message(self):
"""Test send with None as message."""
notify.send_message(self.hass, None)
self.hass.pool.block_till_done()
self.assertTrue(len(self.events) == 0)
def test_sending_templated_message(self):
"""Send a templated message."""
self.hass.states.set('sensor.temperature', 10)
notify.send_message(self.hass, '{{ states.sensor.temperature.state }}',
'{{ states.sensor.temperature.name }}')
self.hass.pool.block_till_done()
last_event = self.events[-1]
self.assertEqual(last_event.data[notify.ATTR_TITLE], 'temperature')
self.assertEqual(last_event.data[notify.ATTR_MESSAGE], '10')
| bdfoster/blumate | tests/components/notify/test_demo.py | Python | mit | 1,653 |
# -*- coding: utf-8 -*-
"""
TeleMir developpement version with fake acquisition device
lancer dans un terminal :
python examples/test_osc_receive.py
"""
from pyacq import StreamHandler, FakeMultiSignals
from pyacq.gui import Oscilloscope, Oscilloscope_f, TimeFreq, TimeFreq2
from TeleMir.gui import Topoplot, KurtosisGraphics, freqBandsGraphics, spaceShipLauncher, Topoplot_imp
from TeleMir.gui import ScanningOscilloscope,SpectrumGraphics
from TeleMir.analyses import TransmitFeatures
#from TeleMir.example import test_osc_receive
import msgpack
#~ import gevent
#~ import zmq.green as zmq
from PyQt4 import QtCore,QtGui
#from multiprocessing import Process
import zmq
import msgpack
import time
import numpy as np
import os
def teleMir_CB():
streamhandler = StreamHandler()
# Configure and start
#~ dev = FakeMultiSignals(streamhandler = streamhandler)
#~ dev.configure( #name = 'Test dev',
#~ nb_channel = 14,
#~ sampling_rate =128.,
#~ buffer_length = 10.,
#~ packet_size = 1,
#~ )
#~ dev.initialize()
#~ dev.start()
filename = '/home/ran/Projets/pyacq_emotiv_recording/alex/Emotiv Systems Pty Ltd #SN201105160008860.raw'
#filename = '/home/ran/Projets/pyacq_emotiv_recording/caro/Emotiv Systems Pty Ltd #SN201105160008860.raw'
#filename = '/home/mini/pyacq_emotiv_recording/simple_blink/Emotiv Systems Pty Ltd #SN201105160008860.raw'
filenameImp = '/home/ran/Projets/EEG_recordings/anneLise/Emotiv Systems Pty Ltd #SN200709276578911.raw'
filenameXY = '/home/ran/Projets/EEG_recordings/anneLise/Emotiv Systems Pty Ltd #SN200709276578912.raw'
precomputed = np.fromfile(filename , dtype = np.float32).reshape(-1, 14).transpose()
precomputedImp = np.fromfile(filenameImp , dtype = np.float32).reshape(-1, 14).transpose()
precomputedXY = np.fromfile(filenameXY , dtype = np.float32).reshape(-1, 2).transpose()
# Configure and start signal
dev = FakeMultiSignals(streamhandler = streamhandler)
dev.configure( #name = 'Test dev',
nb_channel = 14,
sampling_rate =128.,
buffer_length = 30.,
packet_size = 1,
precomputed = precomputed,
)
dev.initialize()
dev.start()
#~ # Configure and start imp
#~ devImp = FakeMultiSignals(streamhandler = streamhandler)
#~ devImp.configure( #name = 'Test dev',
#~ nb_channel = 14,
#~ sampling_rate =128.,
#~ buffer_length = 30.,
#~ packet_size = 1,
#~ precomputed = precomputedImp,
#~ )
#~ devImp.initialize()
#~ devImp.start()
# Configure and start gyroXY
devXY = FakeMultiSignals(streamhandler = streamhandler)
devXY.configure( #name = 'Test dev',
nb_channel = 2,
sampling_rate =128.,
buffer_length = 30.,
packet_size = 1,
precomputed = precomputedXY,
)
devXY.initialize()
devXY.start()
## Configure and start output stream (for extracted feature)
fout = TransmitFeatures(streamhandler = streamhandler)
fout.configure( #name = 'Test fout',
nb_channel = 14, # np.array([1:5])
nb_feature = 6,
nb_pts = 128,
sampling_rate =10.,
buffer_length = 10.,
packet_size = 1,
)
fout.initialize(stream_in = dev.streams[0], stream_xy = devXY.streams[0])
fout.start()
#Osc server
#p = Process(target=., args=('bob',))
#color = 'summer'
# Bleu
#color = 'jet'
# Rouge
color = 'hot'
# vert/jaune
#color = 'summer'
app = QtGui.QApplication([])
# Impedances
w_imp=Topoplot_imp(stream = dev.streams[0], type_Topo= 'imp')
w_imp.show()
# freqbands
w_sp_bd=freqBandsGraphics(stream = dev.streams[0], interval_length = 3., channels = [12])
w_sp_bd.run()
# signal
w_oscilo=Oscilloscope(stream = dev.streams[0])
w_oscilo.show()
w_oscilo.set_params(xsize = 10, mode = 'scroll')
w_oscilo.auto_gain_and_offset(mode = 2)
w_oscilo.gain_zoom(100)
#w_oscilo.set_params(colors = 'jet')
select_chan = np.ones(14, dtype = bool)
w_oscilo.automatic_color(cmap_name = 'jet', selected = select_chan)
# parametres
w_feat1=Oscilloscope_f(stream = fout.streams[0])
w_feat1.show()
w_feat1.set_params(colormap = color)
#w_feat1.auto_gain_and_offset(mode = 1)
#w_feat1.set_params(xsize = 10, mode = 'scroll')
#~ select_feat = np.ones(6, dtype = bool)
#~ # print select
#~ #w_oscilo.set_params(colormap = 'automn', selected = select)
#~ w_feat1.automatic_color(cmap_name = 'jet', selected = select_feat)
w_feat1.showFullScreen()
w_feat1.set_params(xsize = 10, mode = 'scroll')
#~ select_feat = np.ones(4, dtype = bool)
#~ w_feat1.automatic_color(cmap_name = 'jet', selected = select_feat)
# topographie
w_topo=Topoplot(stream = dev.streams[0], type_Topo= 'topo')
w_topo.show()
# temps frequence 1
w_Tf=TimeFreq(stream = dev.streams[0])
w_Tf.show()
w_Tf.set_params(xsize = 10)
w_Tf.change_param_tfr(f_stop = 45, f0 = 1)
w_Tf.set_params(colormap = color)
#w_Tf.clim_changed(20)
#w_Tf.change_param_channel(clim = 20)
# temps frequence 2
w_Tf2=TimeFreq2(stream = dev.streams[0])
w_Tf2.show()
w_Tf2.set_params(xsize = 10)
w_Tf2.change_param_tfr(f_stop = 45, f0 = 1)
w_Tf2.set_params(colormap = color)
# kurtosis
#w_ku=KurtosisGraphics(stream = dev.streams[0], interval_length = 1.)
#w_ku.run()
## Bien moins fluide
# Spectre
#~ w_sp=SpectrumGraphics(dev.streams[0],3.,channels=[11,12])
#~ w_sp.run()
w1 = spaceShipLauncher(dev.streams[0])
w1.run()
w1.showFullScreen()
app.exec_()
# Stope and release the device
fout.stop()
fout.close()
print 'ici'
dev.stop()
dev.close()
print 'ici'
devXY.stop()
devXY.close()
print 'ici'
devImp.stop()
devImp.close()
print 'ici'
if __name__ == '__main__':
teleMir_CB()
| Hemisphere-Project/Telemir-DatabitMe | Telemir-EEG/TeleMir_171013/Fake_TeleMir_CB.py | Python | gpl-2.0 | 6,881 |
from typing import List
from typing import Optional
from sqlalchemy import Boolean
from sqlalchemy import ForeignKey
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import declared_attr
from sqlalchemy.orm import registry as declarative_registry
from sqlalchemy.orm import registry
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
try:
import dataclasses
except ImportError:
pass
class DataclassesTest(fixtures.MappedTest, testing.AssertsCompiledSQL):
__requires__ = ("dataclasses",)
@classmethod
def define_tables(cls, metadata):
Table(
"accounts",
metadata,
Column("account_id", Integer, primary_key=True),
Column("widget_count", Integer, nullable=False),
)
Table(
"widgets",
metadata,
Column("widget_id", Integer, primary_key=True),
Column(
"account_id",
Integer,
ForeignKey("accounts.account_id"),
nullable=False,
),
Column("type", String(30), nullable=False),
Column("name", String(30), nullable=False),
Column("magic", Boolean),
)
@classmethod
def setup_classes(cls):
@dataclasses.dataclass
class Widget:
name: Optional[str] = None
@dataclasses.dataclass
class SpecialWidget(Widget):
magic: bool = False
@dataclasses.dataclass
class Account:
account_id: int
widgets: List[Widget] = dataclasses.field(default_factory=list)
widget_count: int = dataclasses.field(init=False)
def __post_init__(self):
self.widget_count = len(self.widgets)
def add_widget(self, widget: Widget):
self.widgets.append(widget)
self.widget_count += 1
cls.classes.Account = Account
cls.classes.Widget = Widget
cls.classes.SpecialWidget = SpecialWidget
@classmethod
def setup_mappers(cls):
accounts = cls.tables.accounts
widgets = cls.tables.widgets
Account = cls.classes.Account
Widget = cls.classes.Widget
SpecialWidget = cls.classes.SpecialWidget
cls.mapper_registry.map_imperatively(
Widget,
widgets,
polymorphic_on=widgets.c.type,
polymorphic_identity="normal",
)
cls.mapper_registry.map_imperatively(
SpecialWidget,
widgets,
inherits=Widget,
polymorphic_identity="special",
)
cls.mapper_registry.map_imperatively(
Account, accounts, properties={"widgets": relationship(Widget)}
)
def check_account_dataclass(self, obj):
assert dataclasses.is_dataclass(obj)
account_id, widgets, widget_count = dataclasses.fields(obj)
eq_(account_id.name, "account_id")
eq_(widget_count.name, "widget_count")
eq_(widgets.name, "widgets")
def check_widget_dataclass(self, obj):
assert dataclasses.is_dataclass(obj)
(name,) = dataclasses.fields(obj)
eq_(name.name, "name")
def check_special_widget_dataclass(self, obj):
assert dataclasses.is_dataclass(obj)
name, magic = dataclasses.fields(obj)
eq_(name.name, "name")
eq_(magic.name, "magic")
def data_fixture(self):
Account = self.classes.Account
Widget = self.classes.Widget
SpecialWidget = self.classes.SpecialWidget
return Account(
account_id=42,
widgets=[Widget("Foo"), SpecialWidget("Bar", magic=True)],
)
def check_data_fixture(self, account):
Widget = self.classes.Widget
SpecialWidget = self.classes.SpecialWidget
self.check_account_dataclass(account)
eq_(account.account_id, 42)
eq_(account.widget_count, 2)
eq_(len(account.widgets), 2)
foo, bar = account.widgets
self.check_widget_dataclass(foo)
assert isinstance(foo, Widget)
eq_(foo.name, "Foo")
self.check_special_widget_dataclass(bar)
assert isinstance(bar, SpecialWidget)
eq_(bar.name, "Bar")
eq_(bar.magic, True)
def test_classes_are_still_dataclasses(self):
self.check_account_dataclass(self.classes.Account)
self.check_widget_dataclass(self.classes.Widget)
self.check_special_widget_dataclass(self.classes.SpecialWidget)
def test_construction(self):
SpecialWidget = self.classes.SpecialWidget
account = self.data_fixture()
self.check_data_fixture(account)
widget = SpecialWidget()
eq_(widget.name, None)
eq_(widget.magic, False)
def test_equality(self):
Widget = self.classes.Widget
SpecialWidget = self.classes.SpecialWidget
eq_(Widget("Foo"), Widget("Foo"))
assert Widget("Foo") != Widget("Bar")
assert Widget("Foo") != SpecialWidget("Foo")
def test_asdict_and_astuple_widget(self):
Widget = self.classes.Widget
widget = Widget("Foo")
eq_(dataclasses.asdict(widget), {"name": "Foo"})
eq_(dataclasses.astuple(widget), ("Foo",))
def test_asdict_and_astuple_special_widget(self):
SpecialWidget = self.classes.SpecialWidget
widget = SpecialWidget("Bar", magic=True)
eq_(dataclasses.asdict(widget), {"name": "Bar", "magic": True})
eq_(dataclasses.astuple(widget), ("Bar", True))
def test_round_trip(self):
Account = self.classes.Account
account = self.data_fixture()
with fixture_session() as session:
session.add(account)
session.commit()
with fixture_session() as session:
a = session.get(Account, 42)
self.check_data_fixture(a)
def test_appending_to_relationship(self):
Account = self.classes.Account
Widget = self.classes.Widget
account = self.data_fixture()
with Session(testing.db) as session, session.begin():
session.add(account)
account.add_widget(Widget("Xyzzy"))
with Session(testing.db) as session:
a = session.get(Account, 42)
eq_(a.widget_count, 3)
eq_(len(a.widgets), 3)
def test_filtering_on_relationship(self):
Account = self.classes.Account
Widget = self.classes.Widget
account = self.data_fixture()
with Session(testing.db) as session:
session.add(account)
session.commit()
with Session(testing.db) as session:
a = (
session.query(Account)
.join(Account.widgets)
.filter(Widget.name == "Foo")
.one()
)
self.check_data_fixture(a)
class PlainDeclarativeDataclassesTest(DataclassesTest):
__requires__ = ("dataclasses",)
run_setup_classes = "each"
run_setup_mappers = "each"
@classmethod
def setup_classes(cls):
accounts = cls.tables.accounts
widgets = cls.tables.widgets
declarative = declarative_registry().mapped
@declarative
@dataclasses.dataclass
class Widget:
__table__ = widgets
name: Optional[str] = None
__mapper_args__ = dict(
polymorphic_on=widgets.c.type,
polymorphic_identity="normal",
)
@declarative
@dataclasses.dataclass
class SpecialWidget(Widget):
magic: bool = False
__mapper_args__ = dict(
polymorphic_identity="special",
)
@declarative
@dataclasses.dataclass
class Account:
__table__ = accounts
account_id: int
widgets: List[Widget] = dataclasses.field(default_factory=list)
widget_count: int = dataclasses.field(init=False)
widgets = relationship("Widget")
def __post_init__(self):
self.widget_count = len(self.widgets)
def add_widget(self, widget: Widget):
self.widgets.append(widget)
self.widget_count += 1
cls.classes.Account = Account
cls.classes.Widget = Widget
cls.classes.SpecialWidget = SpecialWidget
@classmethod
def setup_mappers(cls):
pass
class FieldEmbeddedDeclarativeDataclassesTest(
fixtures.DeclarativeMappedTest, DataclassesTest
):
__requires__ = ("dataclasses",)
@classmethod
def setup_classes(cls):
declarative = cls.DeclarativeBasic.registry.mapped
@declarative
@dataclasses.dataclass
class Widget:
__tablename__ = "widgets"
__sa_dataclass_metadata_key__ = "sa"
widget_id = Column(Integer, primary_key=True)
account_id = Column(
Integer,
ForeignKey("accounts.account_id"),
nullable=False,
)
type = Column(String(30), nullable=False)
name: Optional[str] = dataclasses.field(
default=None,
metadata={"sa": Column(String(30), nullable=False)},
)
__mapper_args__ = dict(
polymorphic_on="type",
polymorphic_identity="normal",
)
@declarative
@dataclasses.dataclass
class SpecialWidget(Widget):
__sa_dataclass_metadata_key__ = "sa"
magic: bool = dataclasses.field(
default=False, metadata={"sa": Column(Boolean)}
)
__mapper_args__ = dict(
polymorphic_identity="special",
)
@declarative
@dataclasses.dataclass
class Account:
__tablename__ = "accounts"
__sa_dataclass_metadata_key__ = "sa"
account_id: int = dataclasses.field(
metadata={"sa": Column(Integer, primary_key=True)},
)
widgets: List[Widget] = dataclasses.field(
default_factory=list, metadata={"sa": relationship("Widget")}
)
widget_count: int = dataclasses.field(
init=False,
metadata={
"sa": Column("widget_count", Integer, nullable=False)
},
)
def __post_init__(self):
self.widget_count = len(self.widgets)
def add_widget(self, widget: Widget):
self.widgets.append(widget)
self.widget_count += 1
cls.classes.Account = Account
cls.classes.Widget = Widget
cls.classes.SpecialWidget = SpecialWidget
@classmethod
def setup_mappers(cls):
pass
@classmethod
def define_tables(cls, metadata):
pass
def test_asdict_and_astuple_widget(self):
Widget = self.classes.Widget
widget = Widget("Foo")
eq_(dataclasses.asdict(widget), {"name": "Foo"})
eq_(dataclasses.astuple(widget), ("Foo",))
def test_asdict_and_astuple_special_widget(self):
SpecialWidget = self.classes.SpecialWidget
widget = SpecialWidget("Bar", magic=True)
eq_(dataclasses.asdict(widget), {"name": "Bar", "magic": True})
eq_(dataclasses.astuple(widget), ("Bar", True))
class FieldEmbeddedWMixinTest(FieldEmbeddedDeclarativeDataclassesTest):
__requires__ = ("dataclasses",)
@classmethod
def setup_classes(cls):
declarative = cls.DeclarativeBasic.registry.mapped
@dataclasses.dataclass
class SurrogateWidgetPK:
__sa_dataclass_metadata_key__ = "sa"
widget_id: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, primary_key=True)},
)
@declarative
@dataclasses.dataclass
class Widget(SurrogateWidgetPK):
__tablename__ = "widgets"
__sa_dataclass_metadata_key__ = "sa"
account_id = Column(
Integer,
ForeignKey("accounts.account_id"),
nullable=False,
)
type = Column(String(30), nullable=False)
name: Optional[str] = dataclasses.field(
default=None,
metadata={"sa": Column(String(30), nullable=False)},
)
__mapper_args__ = dict(
polymorphic_on="type",
polymorphic_identity="normal",
)
@declarative
@dataclasses.dataclass
class SpecialWidget(Widget):
__sa_dataclass_metadata_key__ = "sa"
magic: bool = dataclasses.field(
default=False, metadata={"sa": Column(Boolean)}
)
__mapper_args__ = dict(
polymorphic_identity="special",
)
@dataclasses.dataclass
class SurrogateAccountPK:
__sa_dataclass_metadata_key__ = "sa"
account_id = Column(
"we_dont_want_to_use_this", Integer, primary_key=True
)
@declarative
@dataclasses.dataclass
class Account(SurrogateAccountPK):
__tablename__ = "accounts"
__sa_dataclass_metadata_key__ = "sa"
account_id: int = dataclasses.field(
metadata={"sa": Column(Integer, primary_key=True)},
)
widgets: List[Widget] = dataclasses.field(
default_factory=list, metadata={"sa": relationship("Widget")}
)
widget_count: int = dataclasses.field(
init=False,
metadata={
"sa": Column("widget_count", Integer, nullable=False)
},
)
def __post_init__(self):
self.widget_count = len(self.widgets)
def add_widget(self, widget: Widget):
self.widgets.append(widget)
self.widget_count += 1
cls.classes.Account = Account
cls.classes.Widget = Widget
cls.classes.SpecialWidget = SpecialWidget
def check_widget_dataclass(self, obj):
assert dataclasses.is_dataclass(obj)
(
id_,
name,
) = dataclasses.fields(obj)
eq_(name.name, "name")
eq_(id_.name, "widget_id")
def check_special_widget_dataclass(self, obj):
assert dataclasses.is_dataclass(obj)
id_, name, magic = dataclasses.fields(obj)
eq_(id_.name, "widget_id")
eq_(name.name, "name")
eq_(magic.name, "magic")
def test_asdict_and_astuple_widget(self):
Widget = self.classes.Widget
widget = Widget("Foo")
eq_(dataclasses.asdict(widget), {"name": "Foo", "widget_id": None})
eq_(
dataclasses.astuple(widget),
(
None,
"Foo",
),
)
def test_asdict_and_astuple_special_widget(self):
SpecialWidget = self.classes.SpecialWidget
widget = SpecialWidget("Bar", magic=True)
eq_(
dataclasses.asdict(widget),
{"name": "Bar", "magic": True, "widget_id": None},
)
eq_(dataclasses.astuple(widget), (None, "Bar", True))
class FieldEmbeddedMixinWLambdaTest(fixtures.DeclarativeMappedTest):
__requires__ = ("dataclasses",)
@classmethod
def setup_classes(cls):
declarative = cls.DeclarativeBasic.registry.mapped
@dataclasses.dataclass
class WidgetDC:
__sa_dataclass_metadata_key__ = "sa"
widget_id: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, primary_key=True)},
)
# fk on mixin
account_id: int = dataclasses.field(
init=False,
metadata={
"sa": lambda: Column(
Integer,
ForeignKey("accounts.account_id"),
nullable=False,
)
},
)
has_a_default: str = dataclasses.field(
default="some default",
metadata={"sa": lambda: Column(String(50))},
)
@declarative
@dataclasses.dataclass
class Widget(WidgetDC):
__tablename__ = "widgets"
__sa_dataclass_metadata_key__ = "sa"
type = Column(String(30), nullable=False)
name: Optional[str] = dataclasses.field(
default=None,
metadata={"sa": Column(String(30), nullable=False)},
)
__mapper_args__ = dict(
polymorphic_on="type",
polymorphic_identity="normal",
)
@declarative
@dataclasses.dataclass
class SpecialWidget(Widget):
__tablename__ = "special_widgets"
__sa_dataclass_metadata_key__ = "sa"
special_widget_id: int = dataclasses.field(
init=False,
metadata={
"sa": Column(
ForeignKey("widgets.widget_id"), primary_key=True
)
},
)
magic: bool = dataclasses.field(
default=False, metadata={"sa": Column(Boolean)}
)
__mapper_args__ = dict(
polymorphic_identity="special",
)
@dataclasses.dataclass
class AccountDC:
__sa_dataclass_metadata_key__ = "sa"
# relationship on mixin
widgets: List[Widget] = dataclasses.field(
default_factory=list,
metadata={"sa": lambda: relationship("Widget")},
)
account_id: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, primary_key=True)},
)
widget_count: int = dataclasses.field(
init=False,
metadata={
"sa": Column("widget_count", Integer, nullable=False)
},
)
@declarative
class Account(AccountDC):
__tablename__ = "accounts"
__sa_dataclass_metadata_key__ = "sa"
def __post_init__(self):
self.widget_count = len(self.widgets)
def add_widget(self, widget: Widget):
self.widgets.append(widget)
self.widget_count += 1
@declarative
@dataclasses.dataclass
class User:
__tablename__ = "user"
__sa_dataclass_metadata_key__ = "sa"
user_id: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, primary_key=True)},
)
# fk w declared attr on mapped class
account_id: int = dataclasses.field(
init=False,
metadata={
"sa": lambda: Column(
Integer,
ForeignKey("accounts.account_id"),
nullable=False,
)
},
)
cls.classes["Account"] = Account
cls.classes["Widget"] = Widget
cls.classes["User"] = User
cls.classes["SpecialWidget"] = SpecialWidget
def test_setup(self):
Account, Widget, User, SpecialWidget = self.classes(
"Account", "Widget", "User", "SpecialWidget"
)
assert "account_id" in Widget.__table__.c
assert list(Widget.__table__.c.account_id.foreign_keys)[0].references(
Account.__table__
)
assert inspect(Account).relationships.widgets.mapper is inspect(Widget)
assert "account_id" not in SpecialWidget.__table__.c
assert "has_a_default" in Widget.__table__.c
assert "has_a_default" not in SpecialWidget.__table__.c
assert "account_id" in User.__table__.c
assert list(User.__table__.c.account_id.foreign_keys)[0].references(
Account.__table__
)
def test_asdict_and_astuple_special_widget(self):
SpecialWidget = self.classes.SpecialWidget
widget = SpecialWidget(magic=True)
eq_(
dataclasses.asdict(widget),
{
"widget_id": None,
"account_id": None,
"has_a_default": "some default",
"name": None,
"special_widget_id": None,
"magic": True,
},
)
eq_(
dataclasses.astuple(widget),
(None, None, "some default", None, None, True),
)
class FieldEmbeddedMixinWDeclaredAttrTest(FieldEmbeddedMixinWLambdaTest):
__requires__ = ("dataclasses",)
@classmethod
def setup_classes(cls):
declarative = cls.DeclarativeBasic.registry.mapped
@dataclasses.dataclass
class WidgetDC:
__sa_dataclass_metadata_key__ = "sa"
widget_id: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, primary_key=True)},
)
# fk on mixin
account_id: int = dataclasses.field(
init=False,
metadata={
"sa": declared_attr(
lambda: Column(
Integer,
ForeignKey("accounts.account_id"),
nullable=False,
)
)
},
)
has_a_default: str = dataclasses.field(
default="some default",
metadata={"sa": declared_attr(lambda: Column(String(50)))},
)
@declarative
@dataclasses.dataclass
class Widget(WidgetDC):
__tablename__ = "widgets"
__sa_dataclass_metadata_key__ = "sa"
type = Column(String(30), nullable=False)
name: Optional[str] = dataclasses.field(
default=None,
metadata={"sa": Column(String(30), nullable=False)},
)
__mapper_args__ = dict(
polymorphic_on="type",
polymorphic_identity="normal",
)
@declarative
@dataclasses.dataclass
class SpecialWidget(Widget):
__tablename__ = "special_widgets"
__sa_dataclass_metadata_key__ = "sa"
special_widget_id: int = dataclasses.field(
init=False,
metadata={
"sa": Column(
ForeignKey("widgets.widget_id"), primary_key=True
)
},
)
magic: bool = dataclasses.field(
default=False, metadata={"sa": Column(Boolean)}
)
__mapper_args__ = dict(
polymorphic_identity="special",
)
@dataclasses.dataclass
class AccountDC:
__sa_dataclass_metadata_key__ = "sa"
# relationship on mixin
widgets: List[Widget] = dataclasses.field(
default_factory=list,
metadata={"sa": declared_attr(lambda: relationship("Widget"))},
)
account_id: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, primary_key=True)},
)
widget_count: int = dataclasses.field(
init=False,
metadata={
"sa": Column("widget_count", Integer, nullable=False)
},
)
@declarative
class Account(AccountDC):
__tablename__ = "accounts"
__sa_dataclass_metadata_key__ = "sa"
def __post_init__(self):
self.widget_count = len(self.widgets)
def add_widget(self, widget: Widget):
self.widgets.append(widget)
self.widget_count += 1
@declarative
@dataclasses.dataclass
class User:
__tablename__ = "user"
__sa_dataclass_metadata_key__ = "sa"
user_id: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, primary_key=True)},
)
# fk w declared attr on mapped class
account_id: int = dataclasses.field(
init=False,
metadata={
"sa": declared_attr(
lambda: Column(
Integer,
ForeignKey("accounts.account_id"),
nullable=False,
)
)
},
)
cls.classes["Account"] = Account
cls.classes["Widget"] = Widget
cls.classes["User"] = User
cls.classes["SpecialWidget"] = SpecialWidget
class PropagationFromMixinTest(fixtures.TestBase):
__requires__ = ("dataclasses",)
def test_propagate_w_plain_mixin_col(self, run_test):
@dataclasses.dataclass
class CommonMixin:
__sa_dataclass_metadata_key__ = "sa"
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
__table_args__ = {"mysql_engine": "InnoDB"}
timestamp = Column(Integer)
run_test(CommonMixin)
def test_propagate_w_field_mixin_col(self, run_test):
@dataclasses.dataclass
class CommonMixin:
__sa_dataclass_metadata_key__ = "sa"
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
__table_args__ = {"mysql_engine": "InnoDB"}
timestamp: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, nullable=False)},
)
run_test(CommonMixin)
def test_propagate_w_field_mixin_col_and_default(self, run_test):
@dataclasses.dataclass
class CommonMixin:
__sa_dataclass_metadata_key__ = "sa"
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
__table_args__ = {"mysql_engine": "InnoDB"}
timestamp: int = dataclasses.field(
init=False,
default=12,
metadata={"sa": Column(Integer, nullable=False)},
)
run_test(CommonMixin)
@testing.fixture()
def run_test(self):
def go(CommonMixin):
declarative = registry().mapped
@declarative
@dataclasses.dataclass
class BaseType(CommonMixin):
discriminator = Column("type", String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
id = Column(Integer, primary_key=True)
value = Column(Integer())
@declarative
@dataclasses.dataclass
class Single(BaseType):
__tablename__ = None
__mapper_args__ = dict(polymorphic_identity="type1")
@declarative
@dataclasses.dataclass
class Joined(BaseType):
__mapper_args__ = dict(polymorphic_identity="type2")
id = Column(
Integer, ForeignKey("basetype.id"), primary_key=True
)
eq_(BaseType.__table__.name, "basetype")
eq_(
list(BaseType.__table__.c.keys()),
["timestamp", "type", "id", "value"],
)
eq_(BaseType.__table__.kwargs, {"mysql_engine": "InnoDB"})
assert Single.__table__ is BaseType.__table__
eq_(Joined.__table__.name, "joined")
eq_(list(Joined.__table__.c.keys()), ["id"])
eq_(Joined.__table__.kwargs, {"mysql_engine": "InnoDB"})
yield go
clear_mappers()
class PropagationFromAbstractTest(fixtures.TestBase):
__requires__ = ("dataclasses",)
def test_propagate_w_plain_mixin_col(self, run_test):
@dataclasses.dataclass
class BaseType:
__sa_dataclass_metadata_key__ = "sa"
__table_args__ = {"mysql_engine": "InnoDB"}
discriminator: str = Column("type", String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
id: int = Column(Integer, primary_key=True)
value: int = Column(Integer())
timestamp: int = Column(Integer)
run_test(BaseType)
def test_propagate_w_field_mixin_col(self, run_test):
@dataclasses.dataclass
class BaseType:
__sa_dataclass_metadata_key__ = "sa"
__table_args__ = {"mysql_engine": "InnoDB"}
discriminator: str = Column("type", String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
id: int = Column(Integer, primary_key=True)
value: int = Column(Integer())
timestamp: int = dataclasses.field(
init=False,
metadata={"sa": Column(Integer, nullable=False)},
)
run_test(BaseType)
def test_propagate_w_field_mixin_col_and_default(self, run_test):
@dataclasses.dataclass
class BaseType:
__sa_dataclass_metadata_key__ = "sa"
__table_args__ = {"mysql_engine": "InnoDB"}
discriminator: str = Column("type", String(50))
__mapper_args__ = dict(polymorphic_on=discriminator)
id: int = Column(Integer, primary_key=True)
value: int = Column(Integer())
timestamp: int = dataclasses.field(
init=False,
default=None,
metadata={"sa": Column(Integer, nullable=False)},
)
run_test(BaseType)
@testing.fixture()
def run_test(self):
def go(BaseType):
declarative = registry().mapped
@declarative
@dataclasses.dataclass
class Single(BaseType):
__tablename__ = "single"
__mapper_args__ = dict(polymorphic_identity="type1")
@declarative
@dataclasses.dataclass
class Joined(Single):
__tablename__ = "joined"
__mapper_args__ = dict(polymorphic_identity="type2")
id = Column(Integer, ForeignKey("single.id"), primary_key=True)
eq_(Single.__table__.name, "single")
eq_(
list(Single.__table__.c.keys()),
["type", "id", "value", "timestamp"],
)
eq_(Single.__table__.kwargs, {"mysql_engine": "InnoDB"})
eq_(Joined.__table__.name, "joined")
eq_(list(Joined.__table__.c.keys()), ["id"])
eq_(Joined.__table__.kwargs, {"mysql_engine": "InnoDB"})
yield go
clear_mappers()
| zzzeek/sqlalchemy | test/orm/test_dataclasses_py3k.py | Python | mit | 31,652 |
"""
Author: Remi Lafage <remi.lafage@onera.fr>
This package is distributed under New BSD license.
Mixture of Experts
"""
# TODO : support for best number of clusters
# TODO : implement verbosity 'print_global'
# TODO : documentation
import numpy as np
import warnings
OLD_SKLEARN = False
try: # scikit-learn < 0.20.0
from sklearn.mixture import GMM as GaussianMixture
OLD_SKLEARN = True
except:
from sklearn.mixture import GaussianMixture
from scipy.stats import multivariate_normal
from smt.utils.options_dictionary import OptionsDictionary
from smt.applications.application import SurrogateBasedApplication
from smt.utils.misc import compute_rms_error
from smt.surrogate_models.surrogate_model import SurrogateModel
warnings.filterwarnings("ignore", category=DeprecationWarning)
MOE_EXPERT_NAMES = [
"KRG",
"KPLS",
"KPLSK",
"LS",
"QP",
"RBF",
"IDW",
"RMTB",
"RMTC",
]
class MOESurrogateModel(SurrogateModel):
"""Wrapper class exposing MOE features as a SurrogateModel subclass."""
name = "MOE"
def _initialize(self):
super(MOESurrogateModel, self)._initialize()
# Copy over options from MOE object
self.moe = moe = MOE()
for key, data in moe.options._declared_entries.items():
self.options._declared_entries[key] = data
value = moe.options[key]
if value is not None:
self.options[key] = value
def _setup(self):
for key in self.moe.options._declared_entries:
if key in self.options:
self.moe.options[key] = self.options[key]
# self.supports['derivatives'] = self.options['derivatives_support'] # Interface not yet implemented
self.supports["variances"] = self.options["variances_support"]
def train(self):
if len(self.training_points) == 0:
xt = self.options["xt"]
yt = self.options["yt"]
self.set_training_values(xt, yt)
super(MOESurrogateModel, self).train()
def _train(self):
self._setup()
for name in self.training_points:
xt, yt = self.training_points[name][0]
self.moe.set_training_values(xt, yt, name=name)
self.moe.train()
def _predict_values(self, x: np.ndarray) -> np.ndarray:
return self.moe.predict_values(x)
def _predict_variances(self, x: np.ndarray) -> np.ndarray:
return self.moe.predict_variances(x)
class MOE(SurrogateBasedApplication):
# Names of experts available to be part of the mixture
AVAILABLE_EXPERTS = [
name
for name in MOE_EXPERT_NAMES
if name in SurrogateBasedApplication._surrogate_type
]
def _initialize(self):
super(MOE, self)._initialize()
declare = self.options.declare
declare("xt", None, types=np.ndarray, desc="Training inputs")
declare("yt", None, types=np.ndarray, desc="Training outputs")
declare(
"ct",
None,
types=np.ndarray,
desc="Training derivative outputs used for clustering",
)
declare("xtest", None, types=np.ndarray, desc="Test inputs")
declare("ytest", None, types=np.ndarray, desc="Test outputs")
declare(
"ctest",
None,
types=np.ndarray,
desc="Derivatives test outputs used for clustering",
)
declare("n_clusters", 2, types=int, desc="Number of clusters")
declare(
"smooth_recombination",
True,
types=bool,
desc="Continuous cluster transition",
)
declare(
"heaviside_optimization",
False,
types=bool,
desc="Optimize Heaviside scaling factor when smooth recombination is used",
)
declare(
"derivatives_support",
False,
types=bool,
desc="Use only experts that support derivatives prediction",
)
declare(
"variances_support",
False,
types=bool,
desc="Use only experts that support variance prediction",
)
declare(
"allow",
[],
desc="Names of allowed experts to be possibly part of the mixture. "
"Empty list corresponds to all surrogates allowed.",
)
declare(
"deny",
[],
desc="Names of forbidden experts",
)
self.x = None
self.y = None
self.c = None
self.n_clusters = None
self.smooth_recombination = None
self.heaviside_optimization = None
self.heaviside_factor = 1.0
# dictionary {name: class} of possible experts wrt to options
self._enabled_expert_types = self._get_enabled_expert_types()
# list of experts after MOE training
self._experts = []
self.xt = None
self.yt = None
@property
def enabled_experts(self):
"""
Returns the names of enabled experts after taking into account MOE options
"""
self._enabled_expert_types = self._get_enabled_expert_types()
return list(self._enabled_expert_types.keys())
def set_training_values(self, xt, yt, name=None):
"""
Set training data (values).
Parameters
----------
xt : np.ndarray[nt, nx] or np.ndarray[nt]
The input values for the nt training points.
yt : np.ndarray[nt, ny] or np.ndarray[nt]
The output values for the nt training points.
name : str or None
An optional label for the group of training points being set.
This is only used in special situations (e.g., multi-fidelity applications).
"""
self.xt = xt
self.yt = yt
def train(self):
"""
Supports for surrogate model API.
Build and train the mixture of experts surrogate.
"""
if self.xt is not None and self.yt is not None:
# set_training_values has been called
self.x = x = self.xt
self.y = y = self.yt
else:
self.x = x = self.options["xt"]
self.y = y = self.options["yt"]
self.c = c = self.options["ct"]
if not self.c:
self.c = c = y
self.n_clusters = self.options["n_clusters"]
self.smooth_recombination = self.options["smooth_recombination"]
self.heaviside_optimization = (
self.options["smooth_recombination"]
and self.options["heaviside_optimization"]
)
self.heaviside_factor = 1.0
self._check_inputs()
self._enabled_expert_types = self._get_enabled_expert_types()
self._experts = []
# Set test values and trained values
xtest = self.options["xtest"]
ytest = self.options["ytest"]
ctest = self.options["ctest"]
if not ctest:
ctest = ytest
values = np.c_[x, y, c]
self.test_data_present = xtest is not None and ytest is not None
if self.test_data_present:
self._test_values = np.c_[xtest, ytest, ctest]
self._training_values = values
else:
self._test_values, self._training_values = self._extract_part(values, 10)
self.ndim = nx = x.shape[1]
xt = self._training_values[:, 0:nx]
yt = self._training_values[:, nx : nx + 1]
ct = self._training_values[:, nx + 1 :]
# Clustering
self.cluster = GaussianMixture(
n_components=self.n_clusters, covariance_type="full", n_init=20
)
self.cluster.fit(np.c_[xt, ct])
if not self.cluster.converged_:
raise Exception("Clustering not converged")
# Choice of the experts and training
self._fit()
xtest = self._test_values[:, 0:nx]
ytest = self._test_values[:, nx : nx + 1]
# Heaviside factor
if self.heaviside_optimization and self.n_clusters > 1:
self.heaviside_factor = self._find_best_heaviside_factor(xtest, ytest)
print("Best Heaviside factor = {}".format(self.heaviside_factor))
self.distribs = self._create_clusters_distributions(self.heaviside_factor)
if not self.test_data_present:
# if we have used part of data to validate, fit on overall data
self._training_values = values
self._fit(new_model=False)
def predict_values(self, x):
"""
Predict the output values at a set of points.
Parameters
----------
x : np.ndarray[nt, nx] or np.ndarray[nt]
Input values for the prediction points.
Returns
-------
y : np.ndarray[nt, ny]
Output values at the prediction points.
"""
if self.smooth_recombination:
y = self._predict_smooth_output(x)
else:
y = self._predict_hard_output(x)
return y
def predict_variances(self, x):
"""
Predict the output variances at a set of points.
Parameters
----------
x : np.ndarray[nt, nx] or np.ndarray[nt]
Input values for the prediction points.
Returns
-------
y : np.ndarray[nt, ny]
Output variances at the prediction points.
"""
if not self.options["variances_support"]:
raise RuntimeError(
"Experts not selected taking variance support into account: use variances_support=True "
"when creating MOE"
)
if self.smooth_recombination:
y = self._predict_smooth_output(x, output_variances=True)
else:
y = self._predict_hard_output(x, output_variances=True)
return y
def _check_inputs(self):
"""
Check the input data given by the client is correct.
raise Value error with relevant message
"""
if self.x is None or self.y is None:
raise ValueError("check x and y values")
if self.x.shape[0] != self.y.shape[0]:
raise ValueError(
"The number of input points %d doesn t match with the number of output points %d."
% (self.x.shape[0], self.y.shape[0])
)
if self.y.shape[0] != self.c.shape[0]:
raise ValueError(
"The number of output points %d doesn t match with the number of criterion weights %d."
% (self.y.shape[0], self.c.shape[0])
)
# choice of number of cluster
max_n_clusters = int(len(self.x) / 10) + 1
if self.n_clusters > max_n_clusters:
print("Number of clusters should be inferior to {0}".format(max_n_clusters))
raise ValueError(
"The number of clusters is too high considering the number of points"
)
def _get_enabled_expert_types(self):
"""
Select relevant surrogate models (experts) regarding MOE feature options
"""
prototypes = {
name: smclass()
for name, smclass in self._surrogate_type.items()
if name in MOE_EXPERT_NAMES
}
if self.options["derivatives_support"]:
prototypes = {
name: proto
for name, proto in prototypes.items()
if proto.supports["derivatives"]
}
if self.options["variances_support"]:
prototypes = {
name: proto
for name, proto in prototypes.items()
if proto.supports["variances"]
}
if self.options["allow"]:
prototypes = {
name: proto
for name, proto in prototypes.items()
if name in self.options["allow"]
}
if self.options["deny"]:
prototypes = {
name: proto
for name, proto in prototypes.items()
if name not in self.options["deny"]
}
if not prototypes:
ValueError(
"List of possible experts is empty: check support, allow and deny options wrt"
)
return {name: self._surrogate_type[name] for name in prototypes}
def _fit(self, new_model=True):
"""
Find the best model for each cluster (clustering already done) and train it if new_model is True
otherwise train the points given (choice of best models by cluster already done)
Arguments
---------
- new_model : bool (optional)
Set true to search the best local model
"""
self.distribs = self._create_clusters_distributions(self.heaviside_factor)
nx = self.ndim
xt = self._training_values[:, 0:nx]
yt = self._training_values[:, nx : nx + 1]
ct = self._training_values[:, nx + 1 :]
xtest = self._test_values[:, 0:nx]
ytest = self._test_values[:, nx : nx + 1]
ctest = self._test_values[:, nx + 1 :]
# sort trained_values for each cluster
cluster_classifier = self.cluster.predict(np.c_[xt, ct])
clustered_values = self._cluster_values(np.c_[xt, yt], cluster_classifier)
# sort test_values for each cluster only used in case of new model
if new_model:
test_cluster_classifier = self.cluster.predict(np.c_[xtest, ctest])
clustered_test_values = self._cluster_values(
np.c_[xtest, ytest], test_cluster_classifier
)
# find model for each cluster
for i in range(self.n_clusters):
if new_model:
model = self._find_best_model(
clustered_values[i], clustered_test_values[i]
)
self._experts.append(model)
else:
# retrain the experts
# used when self._training_values changed with expert best models already found
training_values = np.array(clustered_values[i])
xtrain = training_values[:, 0 : self.ndim]
ytrain = training_values[:, self.ndim]
self._experts[i].set_training_values(xtrain, ytrain)
self._experts[i].train()
def _predict_hard_output(self, x, output_variances=False):
"""
This method predicts the output of a x samples for a
discontinuous recombination.
Arguments
---------
- x : array_like
x samples
Return
------
- predicted_values : array_like
predicted output
"""
predicted_values = []
probs = self._proba_cluster(x)
sort_cluster = np.apply_along_axis(np.argmax, 1, probs)
for i in range(len(sort_cluster)):
model = self._experts[sort_cluster[i]]
if output_variances:
predicted_values.append(model.predict_variances(np.atleast_2d(x[i]))[0])
else:
predicted_values.append(model.predict_values(np.atleast_2d(x[i]))[0])
predicted_values = np.array(predicted_values)
return predicted_values
def _predict_smooth_output(self, x, distribs=None, output_variances=False):
"""
This method predicts the output of x with a smooth recombination.
Arguments:
----------
- x: np.ndarray
x samples
- distribs: distribution list (optional)
array of membership distributions (use self ones if None)
Returns
-------
- predicted_values : array_like
predicted output
"""
predicted_values = []
if distribs is None:
distribs = self.distribs
sort_proba = self._proba_cluster(x, distribs)
for i in range(len(sort_proba)):
recombined_value = 0
for j in range(len(self._experts)):
if output_variances:
expert_value = (
self._experts[j].predict_variances(np.atleast_2d(x[i]))[0]
* sort_proba[i][j] ** 2
)
else:
expert_value = (
self._experts[j].predict_values(np.atleast_2d(x[i]))[0]
* sort_proba[i][j]
)
recombined_value += expert_value
predicted_values.append(recombined_value)
predicted_values = np.array(predicted_values)
return predicted_values
@staticmethod
def _extract_part(values, quantile):
"""
Divide the values list in quantile parts to return one part
of (num/quantile) values out of num values.
Arguments
----------
- values : np.ndarray[num, -1]
the values list to extract from
- quantile : int
the quantile
Returns
-------
- extracted, remaining : np.ndarray, np.ndarray
the extracted values part, the remaining values
"""
num = values.shape[0]
indices = np.arange(0, num, quantile) # uniformly distributed
mask = np.zeros(num, dtype=bool)
mask[indices] = True
return values[mask], values[~mask]
def _find_best_model(self, clustered_values, clustered_test_values):
"""
Find the best model which minimizes the errors.
Arguments :
------------
- clustered_values: array_like
training samples [[X1,X2, ..., Xn, Y], ... ]
Returns :
---------
- model : surrogate model
best trained surrogate model
"""
dim = self.ndim
scores = {}
sms = {}
training_values = np.array(clustered_values)
test_values = np.array(clustered_test_values)
for name, sm_class in self._enabled_expert_types.items():
kwargs = {}
if name in ["RMTB", "RMTC"]:
# Note: RMTS checks for xlimits,
# we take limits on all x (not just the trained_values ones) as
# the surrogate is finally re-trained on the whole x set.
xlimits = np.zeros((dim, 2))
for i in range(dim):
xlimits[i][0] = np.amin(self.x[:, i])
xlimits[i][1] = np.amax(self.x[:, i])
kwargs = {"xlimits": xlimits}
sm = sm_class(**kwargs)
sm.options["print_global"] = False
sm.set_training_values(training_values[:, 0:dim], training_values[:, dim])
sm.train()
expected = test_values[:, dim]
actual = sm.predict_values(test_values[:, 0:dim]).reshape(-1)
l_two = np.linalg.norm(expected - actual, 2)
# l_two_rel = l_two / np.linalg.norm(expected, 2)
# mse = (l_two**2) / len(expected)
# rmse = mse ** 0.5
scores[sm.name] = l_two
print(sm.name, l_two)
sms[sm.name] = sm
best_name = None
best_score = None
for name, rmse in scores.items():
if best_score is None or rmse < best_score:
best_name, best_score = name, rmse
print("Best expert = {}".format(best_name))
return sms[best_name]
def _find_best_heaviside_factor(self, x, y):
"""
Find the best heaviside factor to smooth approximated values.
Arguments
---------
- x: array_like
input training samples
- y: array_like
output training samples
Returns
-------
hfactor : float
best heaviside factor wrt given samples
"""
heaviside_factor = 1.0
if self.n_clusters > 1:
hfactors = np.linspace(0.1, 2.1, num=21)
errors = []
for hfactor in hfactors:
distribs = self._create_clusters_distributions(hfactor)
ypred = self._predict_smooth_output(x, distribs)
err_rel = np.linalg.norm(y - ypred, 2) / np.linalg.norm(y, 2)
errors.append(err_rel)
if max(errors) < 1e-6:
heaviside_factor = 1.0
else:
min_error_index = errors.index(min(errors))
heaviside_factor = hfactors[min_error_index]
return heaviside_factor
"""
Functions related to clustering
"""
def _create_clusters_distributions(self, heaviside_factor=1.0):
"""
Create an array of frozen multivariate normal distributions (distribs).
Arguments
---------
- heaviside_factor: float
Heaviside factor used to scale covariance matrices
Returns:
--------
- distribs: array_like
Array of frozen multivariate normal distributions
with clusters means and covariances
"""
distribs = []
dim = self.ndim
means = self.cluster.means_
if OLD_SKLEARN:
cov = heaviside_factor * self.cluster.covars_
else:
cov = heaviside_factor * self.cluster.covariances_
for k in range(self.n_clusters):
meansk = means[k][0:dim]
covk = cov[k][0:dim, 0:dim]
mvn = multivariate_normal(meansk, covk, allow_singular=True)
distribs.append(mvn)
return distribs
def _cluster_values(self, values, classifier):
"""
Classify values regarding the given classifier info.
Arguments
---------
- values: array_like
values to cluster
- classifier: array_like
Cluster corresponding to each point of value in the same order
Returns
-------
- clustered: array_like
Samples sort by cluster
Example:
---------
values:
[[ 1.67016597e-01 5.42927264e-01 9.25779645e+00]
[ 5.20618344e-01 9.88223010e-01 1.51596837e+02]
[ 6.09979830e-02 2.66824984e-01 1.17890707e+02]
[ 9.62783472e-01 7.36979149e-01 7.37641826e+01]
[ 3.01194132e-01 8.58084068e-02 4.88696602e+01]
[ 6.40398203e-01 6.91090937e-01 8.91963162e+01]
[ 7.90710374e-01 1.40464471e-01 1.89390766e+01]
[ 4.64498124e-01 3.61009635e-01 1.04779656e+01]]
cluster_classifier:
[1 0 0 2 1 2 1 1]
clustered:
[[array([ 0.52061834, 0.98822301, 151.59683723]),
array([ 6.09979830e-02, 2.66824984e-01, 1.17890707e+02])]
[array([ 0.1670166 , 0.54292726, 9.25779645]),
array([ 0.30119413, 0.08580841, 48.86966023]),
array([ 0.79071037, 0.14046447, 18.93907662]),
array([ 0.46449812, 0.36100964, 10.47796563])]
[array([ 0.96278347, 0.73697915, 73.76418261]),
array([ 0.6403982 , 0.69109094, 89.19631619])]]
"""
num = len(classifier)
assert values.shape[0] == num
clusters = [[] for n in range(self.n_clusters)]
for i in range(num):
clusters[classifier[i]].append(values[i])
return clusters
def _proba_cluster_one_sample(self, x, distribs):
"""
Compute membership probabilities to each cluster for one sample.
Arguments
---------
- x: array_like
a sample for which probabilities must be calculated
- distribs: multivariate_normal objects list
array of normal distributions
Returns
-------
- prob: array_like
x membership probability for each cluster
"""
weights = np.array(self.cluster.weights_)
rvs = np.array([distribs[k].pdf(x) for k in range(len(weights))])
probs = weights * rvs
rad = np.sum(probs)
if rad > 0:
probs = probs / rad
return probs
def _proba_cluster(self, x, distribs=None):
"""
Calculate membership probabilities to each cluster for each sample
Arguments
---------
- x: array_like
samples where probabilities must be calculated
- distribs : multivariate_normal objects list (optional)
array of membership distributions. If None, use self ones.
Returns
-------
- probs: array_like
x membership probabilities to each cluster.
Examples :
----------
x:
[[ 0. 0.]
[ 0. 1.]
[ 1. 0.]
[ 1. 1.]]
prob:
[[ 1.49050563e-02 9.85094944e-01]
[ 9.90381299e-01 9.61870088e-03]
[ 9.99208990e-01 7.91009759e-04]
[ 1.48949963e-03 9.98510500e-01]]
"""
if distribs is None:
distribs = self.distribs
if self.n_clusters == 1:
probs = np.ones((x.shape[0], 1))
else:
probs = np.array(
[self._proba_cluster_one_sample(x[i], distribs) for i in range(len(x))]
)
return probs
| SMTorg/smt | smt/applications/moe.py | Python | bsd-3-clause | 25,286 |
"""FLOW-BUS protocol for Bronkhorst instruments.
The protocol is described in the instruction manual number 9.17.027.
The implementation uses the `Construct library <http://construct.readthedocs.org/en/latest/>`__.
"""
from binascii import unhexlify
import unittest
from construct import *
header = Struct("header",
Byte("length"),
Byte("node")
)
command = Enum(Byte("command"),
status = 0,
write = 1, # <--
write_no_status = 2,
write_with_source = 3,
read = 4, # <--
send_repeat = 5,
stop_process = 6,
start_process = 7,
claim_process = 8,
unclaim_process = 9
)
def Data(label):
return Struct(label,
BitStruct("process",
Flag("chained"),
BitField("number", 7)),
BitStruct("parameter",
Const(Flag("chained"), False),
Enum(BitField("type", 2),
c = 0x00 >> 5, # 0
i = 0x20 >> 5, # 1
f = 0x40 >> 5, # 2
l = 0x40 >> 5, # 2
s = 0x60 >> 5), # 3
BitField("number", 5)),
)
read_command = Struct("request",
Embed(header),
OneOf(command, ['read']),
Data("index"),
Data("data"),
If(lambda ctx: ctx.data.parameter.type == "s",
Const(UBInt8("string_length"), 0)),
Terminator
)
def write_command(type_, secured):
return Struct("send",
Embed(header),
OneOf(command, ['write', 'write_no_status']),
Const(String(None, 3), "\x80\x0a\x40") if secured else Pass,
Data("index"),
dict(
c=UBInt8("value"),
i=UBInt16("value"),
f=BFloat32("value"),
l=UBInt32("value"),
s=Embed(Struct(None, UBInt8("string_length"),
IfThenElse("value",
lambda ctx: ctx["string_length"] is 0,
CString(None),
# read string_length bytes (PascalString)
MetaField(None, lambda ctx: ctx["string_length"]))))
)[type_],
Const(String(None, 3), "\x00\x0a\x52") if secured else Pass,
Terminator
)
error_message = Struct("FLOW-BUS error",
Embed(header),
Enum(Byte("error"),
colon_missing = 1,
first_byte = 2,
message_length = 3,
receiver = 4,
communication_error = 5,
sender_timeout = 8,
answer_timeout = 9,
)
)
status_message = Struct("FLOW-BUS status",
Embed(header),
command,
Enum(Byte("status"),
no_error = 0x00,
process_claimed = 0x01,
command_error = 0x02,
process_error = 0x03,
parameter_error = 0x04,
param_type_error = 0x05,
param_value_error = 0x06,
network_not_active = 0x07,
timeout_start_char = 0x08,
timeout_serial_line = 0x09,
hardware_mem_error = 0x0a,
node_number_error = 0x0b,
general_com_error = 0x0c,
read_only_param = 0x0d,
PC_com_error = 0x0e,
no_RS232_connection = 0x0f,
PC_out_of_mem = 0x10,
write_only_param = 0x11,
syst_config_unknown = 0x12,
no_free_node_address = 0x13,
wrong_iface_type = 0x14,
serial_port_error = 0x15,
serial_open_error = 0x16,
com_error = 0x17,
iface_busmaster_error = 0x18,
timeout_ans = 0x19,
no_start_char = 0x1a,
first_digit_error = 0x1b,
host_buffer_overflow = 0x1c,
buffer_overflow = 0x1d,
no_answer_found = 0x1e,
error_closing_connection = 0x1f,
synch_error = 0x20,
send_error = 0x21,
com_error_2 = 0x22,
module_buffer_overflow = 0x23
),
Byte("byte_index"),
Terminator
)
class _Data(object):
class Byte(object):
def __init__(self, number, type="c", chained=False):
self.number = number
self.type = type
self.chained = chained
def __init__(self, process, param, param_type, chained=False):
self.process = _Data.Byte(process, chained=chained)
self.parameter = _Data.Byte(param, param_type)
class Reader(object):
index = 1
def __init__(self, node, process, param, param_type):
self.length = 0
self.node = node
self.command = "read"
self.index = _Data(process, Reader.index, param_type)
self.data = _Data(process, param, param_type)
self.string_length = 0
self.length = len(self.build()) - 1
@classmethod
def fromContext(cls, context):
process = context.subsystem.process
return cls(context.node, process, context.reader,
context._command.type)
def build(self):
"""object to message"""
return read_command.build(self)
@staticmethod
def parse(msg):
"""message to object"""
return read_command.parse(msg)
class Writer(object):
def __init__(self, node, process, param, param_type, secured, value):
self.length = 0
self.node = node
self.command = "write"
self.index = _Data(process, param, param_type, secured)
self.param_type = param_type
self.secured = secured
self.value = value
self.string_length = 0
self.length = len(self.build()) - 1
@classmethod
def fromContext(cls, context):
process = context.subsystem.process
return cls(context.node, process, context.writer,
context._command.type, context._command.access ==
"Access.SEC", context.value)
def build(self):
"""object to message"""
return write_command(self.param_type, self.secured).build(self)
@staticmethod
def parse(msg, type_, secured=False):
"""message to object"""
return write_command(type_, secured).parse(msg)
class Status(object):
@staticmethod
def parse(msg):
return status_message.parse(msg).status
class TestFlowBus(unittest.TestCase):
def setUp(self):
self.msg = dict(status=unhexlify("0403000005"),
read=unhexlify("06030401210121"),
write=unhexlify("06030101213E80"),
secwrite=unhexlify("0C0301800A40E121000A000A52"))
def test_data_builder(self):
self.assertEqual(Data("").build(_Data(10, 2, "i")), unhexlify("0a22"))
def test_reader_builder(self):
self.assertEqual(read_command.build(Reader(3, 1, 1, "c")),
unhexlify("06030401010101"))
def test_writer_builder(self):
self.assertEqual(
write_command("c", False).build(Writer(3, 1, 2, "c", False, 10)),
unhexlify("05030101020a"))
def test_status(self):
msg = self.msg["status"]
self.assertEqual(status_message.parse(msg).command, "status")
def test_status_build(self):
msg = self.msg["status"]
self.assertEqual(status_message.build(status_message.parse(msg)), msg)
def test_read(self):
msg = self.msg["read"]
self.assertEqual(read_command.parse(msg).command, "read")
def test_read_build(self):
msg = self.msg["read"]
self.assertEqual(read_command.build(read_command.parse(msg)), msg)
def test_write(self):
msg = self.msg["write"]
self.assertEqual(write_command("i", False).parse(msg).command, "write")
def test_write_build(self):
msg = self.msg["write"]
self.assertEqual(
write_command("i", False).build(write_command("i", False).parse(msg)),
msg)
def test_sec_write(self):
msg = self.msg["secwrite"]
self.assertEqual(
write_command("i", True).build(write_command("i", True).parse(msg)),
msg)
| Synss/pyhard2 | pyhard2/driver/_bronkhorst.py | Python | mit | 7,674 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.